index
int64
0
0
repo_id
stringclasses
179 values
file_path
stringlengths
26
186
content
stringlengths
1
2.1M
__index_level_0__
int64
0
9
0
hf_public_repos/blog/assets/21_bert_cpu_scaling_part_1
hf_public_repos/blog/assets/21_bert_cpu_scaling_part_1/imgs/pytorch_vs_tf_oob_big_batch.svg
<svg width="4310" height="2426" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" overflow="hidden"><defs><clipPath id="clip0"><rect x="656" y="-1" width="4310" height="2426"/></clipPath><clipPath id="clip1"><rect x="656" y="0" width="4307" height="2423"/></clipPath><clipPath id="clip2"><rect x="926" y="169" width="3708" height="1921"/></clipPath><clipPath id="clip3"><rect x="926" y="169" width="3708" height="1921"/></clipPath><linearGradient x1="1057" y1="2079" x2="1113" y2="2079" gradientUnits="userSpaceOnUse" spreadMethod="reflect" id="fill4"><stop offset="0" stop-color="#FF7101"/><stop offset="0.0344828" stop-color="#FF7200"/><stop offset="0.0689655" stop-color="#FF7400"/><stop offset="0.103448" stop-color="#FF7600"/><stop offset="0.137931" stop-color="#FF7800"/><stop offset="0.172414" stop-color="#FF7900"/><stop offset="0.206897" stop-color="#FF7B00"/><stop offset="0.241379" stop-color="#FF7D00"/><stop offset="0.275862" stop-color="#FF7E00"/><stop offset="0.310345" stop-color="#FF8000"/><stop offset="0.344828" stop-color="#FF8100"/><stop offset="0.37931" stop-color="#FF8200"/><stop offset="0.413793" stop-color="#FF8300"/><stop offset="0.448276" stop-color="#FF8500"/><stop offset="0.482759" stop-color="#FF8600"/><stop offset="0.517241" stop-color="#FF8700"/><stop offset="0.551724" stop-color="#FF8800"/><stop offset="0.586207" stop-color="#FF8900"/><stop offset="0.62069" stop-color="#FF8A00"/><stop offset="0.655172" stop-color="#FF8A00"/><stop offset="0.689655" stop-color="#FF8B00"/><stop offset="0.724138" stop-color="#FF8C00"/><stop offset="0.758621" stop-color="#FF8C00"/><stop offset="0.793103" stop-color="#FF8D00"/><stop offset="0.827586" stop-color="#FF8D00"/><stop offset="0.862069" stop-color="#FF8E00"/><stop offset="0.896552" stop-color="#FF8E00"/><stop offset="0.931035" stop-color="#FF8E00"/><stop offset="0.965517" stop-color="#FF8E00"/><stop offset="1" stop-color="#FF8F00"/></linearGradient><clipPath id="clip5"><rect x="926" y="169" width="3708" height="1921"/></clipPath><linearGradient x1="1304" y1="2078.5" x2="1360" y2="2078.5" gradientUnits="userSpaceOnUse" spreadMethod="reflect" id="fill6"><stop offset="0" stop-color="#FF7101"/><stop offset="0.0344828" stop-color="#FF7200"/><stop offset="0.0689655" stop-color="#FF7400"/><stop offset="0.103448" stop-color="#FF7600"/><stop offset="0.137931" stop-color="#FF7800"/><stop offset="0.172414" stop-color="#FF7900"/><stop offset="0.206897" stop-color="#FF7B00"/><stop offset="0.241379" stop-color="#FF7D00"/><stop offset="0.275862" stop-color="#FF7E00"/><stop offset="0.310345" stop-color="#FF8000"/><stop offset="0.344828" stop-color="#FF8100"/><stop offset="0.37931" stop-color="#FF8200"/><stop offset="0.413793" stop-color="#FF8300"/><stop offset="0.448276" stop-color="#FF8500"/><stop offset="0.482759" stop-color="#FF8600"/><stop offset="0.517241" stop-color="#FF8700"/><stop offset="0.551724" stop-color="#FF8800"/><stop offset="0.586207" stop-color="#FF8900"/><stop offset="0.62069" stop-color="#FF8A00"/><stop offset="0.655172" stop-color="#FF8A00"/><stop offset="0.689655" stop-color="#FF8B00"/><stop offset="0.724138" stop-color="#FF8C00"/><stop offset="0.758621" stop-color="#FF8C00"/><stop offset="0.793103" stop-color="#FF8D00"/><stop offset="0.827586" stop-color="#FF8D00"/><stop offset="0.862069" stop-color="#FF8E00"/><stop offset="0.896552" stop-color="#FF8E00"/><stop offset="0.931035" stop-color="#FF8E00"/><stop offset="0.965517" stop-color="#FF8E00"/><stop offset="1" stop-color="#FF8F00"/></linearGradient><clipPath id="clip7"><rect x="926" y="169" width="3708" height="1921"/></clipPath><linearGradient x1="1551" y1="2072.5" x2="1607" y2="2072.5" gradientUnits="userSpaceOnUse" spreadMethod="reflect" id="fill8"><stop offset="0" stop-color="#FF7101"/><stop offset="0.0344828" stop-color="#FF7200"/><stop offset="0.0689655" stop-color="#FF7400"/><stop offset="0.103448" stop-color="#FF7600"/><stop offset="0.137931" stop-color="#FF7800"/><stop offset="0.172414" stop-color="#FF7900"/><stop offset="0.206897" stop-color="#FF7B00"/><stop offset="0.241379" stop-color="#FF7D00"/><stop offset="0.275862" stop-color="#FF7E00"/><stop offset="0.310345" stop-color="#FF8000"/><stop offset="0.344828" stop-color="#FF8100"/><stop offset="0.37931" stop-color="#FF8200"/><stop offset="0.413793" stop-color="#FF8300"/><stop offset="0.448276" stop-color="#FF8500"/><stop offset="0.482759" stop-color="#FF8600"/><stop offset="0.517241" stop-color="#FF8700"/><stop offset="0.551724" stop-color="#FF8800"/><stop offset="0.586207" stop-color="#FF8900"/><stop offset="0.62069" stop-color="#FF8A00"/><stop offset="0.655172" stop-color="#FF8A00"/><stop offset="0.689655" stop-color="#FF8B00"/><stop offset="0.724138" stop-color="#FF8C00"/><stop offset="0.758621" stop-color="#FF8C00"/><stop offset="0.793103" stop-color="#FF8D00"/><stop offset="0.827586" stop-color="#FF8D00"/><stop offset="0.862069" stop-color="#FF8E00"/><stop offset="0.896552" stop-color="#FF8E00"/><stop offset="0.931035" stop-color="#FF8E00"/><stop offset="0.965517" stop-color="#FF8E00"/><stop offset="1" stop-color="#FF8F00"/></linearGradient><clipPath id="clip9"><rect x="926" y="169" width="3708" height="1921"/></clipPath><linearGradient x1="1798" y1="2014.5" x2="1854" y2="2014.5" gradientUnits="userSpaceOnUse" spreadMethod="reflect" id="fill10"><stop offset="0" stop-color="#FF7101"/><stop offset="0.0344828" stop-color="#FF7200"/><stop offset="0.0689655" stop-color="#FF7400"/><stop offset="0.103448" stop-color="#FF7600"/><stop offset="0.137931" stop-color="#FF7800"/><stop offset="0.172414" stop-color="#FF7900"/><stop offset="0.206897" stop-color="#FF7B00"/><stop offset="0.241379" stop-color="#FF7D00"/><stop offset="0.275862" stop-color="#FF7E00"/><stop offset="0.310345" stop-color="#FF8000"/><stop offset="0.344828" stop-color="#FF8100"/><stop offset="0.37931" stop-color="#FF8200"/><stop offset="0.413793" stop-color="#FF8300"/><stop offset="0.448276" stop-color="#FF8500"/><stop offset="0.482759" stop-color="#FF8600"/><stop offset="0.517241" stop-color="#FF8700"/><stop offset="0.551724" stop-color="#FF8800"/><stop offset="0.586207" stop-color="#FF8900"/><stop offset="0.62069" stop-color="#FF8A00"/><stop offset="0.655172" stop-color="#FF8A00"/><stop offset="0.689655" stop-color="#FF8B00"/><stop offset="0.724138" stop-color="#FF8C00"/><stop offset="0.758621" stop-color="#FF8C00"/><stop offset="0.793103" stop-color="#FF8D00"/><stop offset="0.827586" stop-color="#FF8D00"/><stop offset="0.862069" stop-color="#FF8E00"/><stop offset="0.896552" stop-color="#FF8E00"/><stop offset="0.931035" stop-color="#FF8E00"/><stop offset="0.965517" stop-color="#FF8E00"/><stop offset="1" stop-color="#FF8F00"/></linearGradient><clipPath id="clip11"><rect x="926" y="169" width="3708" height="1921"/></clipPath><linearGradient x1="2045" y1="1965" x2="2101" y2="1965" gradientUnits="userSpaceOnUse" spreadMethod="reflect" id="fill12"><stop offset="0" stop-color="#FF7101"/><stop offset="0.0344828" stop-color="#FF7200"/><stop offset="0.0689655" stop-color="#FF7400"/><stop offset="0.103448" stop-color="#FF7600"/><stop offset="0.137931" stop-color="#FF7800"/><stop offset="0.172414" stop-color="#FF7900"/><stop offset="0.206897" stop-color="#FF7B00"/><stop offset="0.241379" stop-color="#FF7D00"/><stop offset="0.275862" stop-color="#FF7E00"/><stop offset="0.310345" stop-color="#FF8000"/><stop offset="0.344828" stop-color="#FF8100"/><stop offset="0.37931" stop-color="#FF8200"/><stop offset="0.413793" stop-color="#FF8300"/><stop offset="0.448276" stop-color="#FF8500"/><stop offset="0.482759" stop-color="#FF8600"/><stop offset="0.517241" stop-color="#FF8700"/><stop offset="0.551724" stop-color="#FF8800"/><stop offset="0.586207" stop-color="#FF8900"/><stop offset="0.62069" stop-color="#FF8A00"/><stop offset="0.655172" stop-color="#FF8A00"/><stop offset="0.689655" stop-color="#FF8B00"/><stop offset="0.724138" stop-color="#FF8C00"/><stop offset="0.758621" stop-color="#FF8C00"/><stop offset="0.793103" stop-color="#FF8D00"/><stop offset="0.827586" stop-color="#FF8D00"/><stop offset="0.862069" stop-color="#FF8E00"/><stop offset="0.896552" stop-color="#FF8E00"/><stop offset="0.931035" stop-color="#FF8E00"/><stop offset="0.965517" stop-color="#FF8E00"/><stop offset="1" stop-color="#FF8F00"/></linearGradient><clipPath id="clip13"><rect x="926" y="169" width="3708" height="1921"/></clipPath><linearGradient x1="2292" y1="2078" x2="2348" y2="2078" gradientUnits="userSpaceOnUse" spreadMethod="reflect" id="fill14"><stop offset="0" stop-color="#FF7101"/><stop offset="0.0344828" stop-color="#FF7200"/><stop offset="0.0689655" stop-color="#FF7400"/><stop offset="0.103448" stop-color="#FF7600"/><stop offset="0.137931" stop-color="#FF7800"/><stop offset="0.172414" stop-color="#FF7900"/><stop offset="0.206897" stop-color="#FF7B00"/><stop offset="0.241379" stop-color="#FF7D00"/><stop offset="0.275862" stop-color="#FF7E00"/><stop offset="0.310345" stop-color="#FF8000"/><stop offset="0.344828" stop-color="#FF8100"/><stop offset="0.37931" stop-color="#FF8200"/><stop offset="0.413793" stop-color="#FF8300"/><stop offset="0.448276" stop-color="#FF8500"/><stop offset="0.482759" stop-color="#FF8600"/><stop offset="0.517241" stop-color="#FF8700"/><stop offset="0.551724" stop-color="#FF8800"/><stop offset="0.586207" stop-color="#FF8900"/><stop offset="0.62069" stop-color="#FF8A00"/><stop offset="0.655172" stop-color="#FF8A00"/><stop offset="0.689655" stop-color="#FF8B00"/><stop offset="0.724138" stop-color="#FF8C00"/><stop offset="0.758621" stop-color="#FF8C00"/><stop offset="0.793103" stop-color="#FF8D00"/><stop offset="0.827586" stop-color="#FF8D00"/><stop offset="0.862069" stop-color="#FF8E00"/><stop offset="0.896552" stop-color="#FF8E00"/><stop offset="0.931035" stop-color="#FF8E00"/><stop offset="0.965517" stop-color="#FF8E00"/><stop offset="1" stop-color="#FF8F00"/></linearGradient><clipPath id="clip15"><rect x="926" y="169" width="3708" height="1921"/></clipPath><linearGradient x1="2539" y1="2076.5" x2="2595" y2="2076.5" gradientUnits="userSpaceOnUse" spreadMethod="reflect" id="fill16"><stop offset="0" stop-color="#FF7101"/><stop offset="0.0344828" stop-color="#FF7200"/><stop offset="0.0689655" stop-color="#FF7400"/><stop offset="0.103448" stop-color="#FF7600"/><stop offset="0.137931" stop-color="#FF7800"/><stop offset="0.172414" stop-color="#FF7900"/><stop offset="0.206897" stop-color="#FF7B00"/><stop offset="0.241379" stop-color="#FF7D00"/><stop offset="0.275862" stop-color="#FF7E00"/><stop offset="0.310345" stop-color="#FF8000"/><stop offset="0.344828" stop-color="#FF8100"/><stop offset="0.37931" stop-color="#FF8200"/><stop offset="0.413793" stop-color="#FF8300"/><stop offset="0.448276" stop-color="#FF8500"/><stop offset="0.482759" stop-color="#FF8600"/><stop offset="0.517241" stop-color="#FF8700"/><stop offset="0.551724" stop-color="#FF8800"/><stop offset="0.586207" stop-color="#FF8900"/><stop offset="0.62069" stop-color="#FF8A00"/><stop offset="0.655172" stop-color="#FF8A00"/><stop offset="0.689655" stop-color="#FF8B00"/><stop offset="0.724138" stop-color="#FF8C00"/><stop offset="0.758621" stop-color="#FF8C00"/><stop offset="0.793103" stop-color="#FF8D00"/><stop offset="0.827586" stop-color="#FF8D00"/><stop offset="0.862069" stop-color="#FF8E00"/><stop offset="0.896552" stop-color="#FF8E00"/><stop offset="0.931035" stop-color="#FF8E00"/><stop offset="0.965517" stop-color="#FF8E00"/><stop offset="1" stop-color="#FF8F00"/></linearGradient><clipPath id="clip17"><rect x="926" y="169" width="3708" height="1921"/></clipPath><linearGradient x1="2786" y1="2055" x2="2842" y2="2055" gradientUnits="userSpaceOnUse" spreadMethod="reflect" id="fill18"><stop offset="0" stop-color="#FF7101"/><stop offset="0.0344828" stop-color="#FF7200"/><stop offset="0.0689655" stop-color="#FF7400"/><stop offset="0.103448" stop-color="#FF7600"/><stop offset="0.137931" stop-color="#FF7800"/><stop offset="0.172414" stop-color="#FF7900"/><stop offset="0.206897" stop-color="#FF7B00"/><stop offset="0.241379" stop-color="#FF7D00"/><stop offset="0.275862" stop-color="#FF7E00"/><stop offset="0.310345" stop-color="#FF8000"/><stop offset="0.344828" stop-color="#FF8100"/><stop offset="0.37931" stop-color="#FF8200"/><stop offset="0.413793" stop-color="#FF8300"/><stop offset="0.448276" stop-color="#FF8500"/><stop offset="0.482759" stop-color="#FF8600"/><stop offset="0.517241" stop-color="#FF8700"/><stop offset="0.551724" stop-color="#FF8800"/><stop offset="0.586207" stop-color="#FF8900"/><stop offset="0.62069" stop-color="#FF8A00"/><stop offset="0.655172" stop-color="#FF8A00"/><stop offset="0.689655" stop-color="#FF8B00"/><stop offset="0.724138" stop-color="#FF8C00"/><stop offset="0.758621" stop-color="#FF8C00"/><stop offset="0.793103" stop-color="#FF8D00"/><stop offset="0.827586" stop-color="#FF8D00"/><stop offset="0.862069" stop-color="#FF8E00"/><stop offset="0.896552" stop-color="#FF8E00"/><stop offset="0.931035" stop-color="#FF8E00"/><stop offset="0.965517" stop-color="#FF8E00"/><stop offset="1" stop-color="#FF8F00"/></linearGradient><clipPath id="clip19"><rect x="926" y="169" width="3708" height="1921"/></clipPath><linearGradient x1="3033" y1="1940" x2="3089" y2="1940" gradientUnits="userSpaceOnUse" spreadMethod="reflect" id="fill20"><stop offset="0" stop-color="#FF7101"/><stop offset="0.0344828" stop-color="#FF7200"/><stop offset="0.0689655" stop-color="#FF7400"/><stop offset="0.103448" stop-color="#FF7600"/><stop offset="0.137931" stop-color="#FF7800"/><stop offset="0.172414" stop-color="#FF7900"/><stop offset="0.206897" stop-color="#FF7B00"/><stop offset="0.241379" stop-color="#FF7D00"/><stop offset="0.275862" stop-color="#FF7E00"/><stop offset="0.310345" stop-color="#FF8000"/><stop offset="0.344828" stop-color="#FF8100"/><stop offset="0.37931" stop-color="#FF8200"/><stop offset="0.413793" stop-color="#FF8300"/><stop offset="0.448276" stop-color="#FF8500"/><stop offset="0.482759" stop-color="#FF8600"/><stop offset="0.517241" stop-color="#FF8700"/><stop offset="0.551724" stop-color="#FF8800"/><stop offset="0.586207" stop-color="#FF8900"/><stop offset="0.62069" stop-color="#FF8A00"/><stop offset="0.655172" stop-color="#FF8A00"/><stop offset="0.689655" stop-color="#FF8B00"/><stop offset="0.724138" stop-color="#FF8C00"/><stop offset="0.758621" stop-color="#FF8C00"/><stop offset="0.793103" stop-color="#FF8D00"/><stop offset="0.827586" stop-color="#FF8D00"/><stop offset="0.862069" stop-color="#FF8E00"/><stop offset="0.896552" stop-color="#FF8E00"/><stop offset="0.931035" stop-color="#FF8E00"/><stop offset="0.965517" stop-color="#FF8E00"/><stop offset="1" stop-color="#FF8F00"/></linearGradient><clipPath id="clip21"><rect x="926" y="169" width="3708" height="1921"/></clipPath><linearGradient x1="3280" y1="1836" x2="3336" y2="1836" gradientUnits="userSpaceOnUse" spreadMethod="reflect" id="fill22"><stop offset="0" stop-color="#FF7101"/><stop offset="0.0344828" stop-color="#FF7200"/><stop offset="0.0689655" stop-color="#FF7400"/><stop offset="0.103448" stop-color="#FF7600"/><stop offset="0.137931" stop-color="#FF7800"/><stop offset="0.172414" stop-color="#FF7900"/><stop offset="0.206897" stop-color="#FF7B00"/><stop offset="0.241379" stop-color="#FF7D00"/><stop offset="0.275862" stop-color="#FF7E00"/><stop offset="0.310345" stop-color="#FF8000"/><stop offset="0.344828" stop-color="#FF8100"/><stop offset="0.37931" stop-color="#FF8200"/><stop offset="0.413793" stop-color="#FF8300"/><stop offset="0.448276" stop-color="#FF8500"/><stop offset="0.482759" stop-color="#FF8600"/><stop offset="0.517241" stop-color="#FF8700"/><stop offset="0.551724" stop-color="#FF8800"/><stop offset="0.586207" stop-color="#FF8900"/><stop offset="0.62069" stop-color="#FF8A00"/><stop offset="0.655172" stop-color="#FF8A00"/><stop offset="0.689655" stop-color="#FF8B00"/><stop offset="0.724138" stop-color="#FF8C00"/><stop offset="0.758621" stop-color="#FF8C00"/><stop offset="0.793103" stop-color="#FF8D00"/><stop offset="0.827586" stop-color="#FF8D00"/><stop offset="0.862069" stop-color="#FF8E00"/><stop offset="0.896552" stop-color="#FF8E00"/><stop offset="0.931035" stop-color="#FF8E00"/><stop offset="0.965517" stop-color="#FF8E00"/><stop offset="1" stop-color="#FF8F00"/></linearGradient><clipPath id="clip23"><rect x="926" y="169" width="3708" height="1921"/></clipPath><linearGradient x1="3527" y1="2069" x2="3583" y2="2069" gradientUnits="userSpaceOnUse" spreadMethod="reflect" id="fill24"><stop offset="0" stop-color="#FF7101"/><stop offset="0.0344828" stop-color="#FF7200"/><stop offset="0.0689655" stop-color="#FF7400"/><stop offset="0.103448" stop-color="#FF7600"/><stop offset="0.137931" stop-color="#FF7800"/><stop offset="0.172414" stop-color="#FF7900"/><stop offset="0.206897" stop-color="#FF7B00"/><stop offset="0.241379" stop-color="#FF7D00"/><stop offset="0.275862" stop-color="#FF7E00"/><stop offset="0.310345" stop-color="#FF8000"/><stop offset="0.344828" stop-color="#FF8100"/><stop offset="0.37931" stop-color="#FF8200"/><stop offset="0.413793" stop-color="#FF8300"/><stop offset="0.448276" stop-color="#FF8500"/><stop offset="0.482759" stop-color="#FF8600"/><stop offset="0.517241" stop-color="#FF8700"/><stop offset="0.551724" stop-color="#FF8800"/><stop offset="0.586207" stop-color="#FF8900"/><stop offset="0.62069" stop-color="#FF8A00"/><stop offset="0.655172" stop-color="#FF8A00"/><stop offset="0.689655" stop-color="#FF8B00"/><stop offset="0.724138" stop-color="#FF8C00"/><stop offset="0.758621" stop-color="#FF8C00"/><stop offset="0.793103" stop-color="#FF8D00"/><stop offset="0.827586" stop-color="#FF8D00"/><stop offset="0.862069" stop-color="#FF8E00"/><stop offset="0.896552" stop-color="#FF8E00"/><stop offset="0.931035" stop-color="#FF8E00"/><stop offset="0.965517" stop-color="#FF8E00"/><stop offset="1" stop-color="#FF8F00"/></linearGradient><clipPath id="clip25"><rect x="926" y="169" width="3708" height="1921"/></clipPath><linearGradient x1="3774" y1="2056.5" x2="3830" y2="2056.5" gradientUnits="userSpaceOnUse" spreadMethod="reflect" id="fill26"><stop offset="0" stop-color="#FF7101"/><stop offset="0.0344828" stop-color="#FF7200"/><stop offset="0.0689655" stop-color="#FF7400"/><stop offset="0.103448" stop-color="#FF7600"/><stop offset="0.137931" stop-color="#FF7800"/><stop offset="0.172414" stop-color="#FF7900"/><stop offset="0.206897" stop-color="#FF7B00"/><stop offset="0.241379" stop-color="#FF7D00"/><stop offset="0.275862" stop-color="#FF7E00"/><stop offset="0.310345" stop-color="#FF8000"/><stop offset="0.344828" stop-color="#FF8100"/><stop offset="0.37931" stop-color="#FF8200"/><stop offset="0.413793" stop-color="#FF8300"/><stop offset="0.448276" stop-color="#FF8500"/><stop offset="0.482759" stop-color="#FF8600"/><stop offset="0.517241" stop-color="#FF8700"/><stop offset="0.551724" stop-color="#FF8800"/><stop offset="0.586207" stop-color="#FF8900"/><stop offset="0.62069" stop-color="#FF8A00"/><stop offset="0.655172" stop-color="#FF8A00"/><stop offset="0.689655" stop-color="#FF8B00"/><stop offset="0.724138" stop-color="#FF8C00"/><stop offset="0.758621" stop-color="#FF8C00"/><stop offset="0.793103" stop-color="#FF8D00"/><stop offset="0.827586" stop-color="#FF8D00"/><stop offset="0.862069" stop-color="#FF8E00"/><stop offset="0.896552" stop-color="#FF8E00"/><stop offset="0.931035" stop-color="#FF8E00"/><stop offset="0.965517" stop-color="#FF8E00"/><stop offset="1" stop-color="#FF8F00"/></linearGradient><clipPath id="clip27"><rect x="926" y="169" width="3708" height="1921"/></clipPath><linearGradient x1="4021" y1="1948.5" x2="4077" y2="1948.5" gradientUnits="userSpaceOnUse" spreadMethod="reflect" id="fill28"><stop offset="0" stop-color="#FF7101"/><stop offset="0.0344828" stop-color="#FF7200"/><stop offset="0.0689655" stop-color="#FF7400"/><stop offset="0.103448" stop-color="#FF7600"/><stop offset="0.137931" stop-color="#FF7800"/><stop offset="0.172414" stop-color="#FF7900"/><stop offset="0.206897" stop-color="#FF7B00"/><stop offset="0.241379" stop-color="#FF7D00"/><stop offset="0.275862" stop-color="#FF7E00"/><stop offset="0.310345" stop-color="#FF8000"/><stop offset="0.344828" stop-color="#FF8100"/><stop offset="0.37931" stop-color="#FF8200"/><stop offset="0.413793" stop-color="#FF8300"/><stop offset="0.448276" stop-color="#FF8500"/><stop offset="0.482759" stop-color="#FF8600"/><stop offset="0.517241" stop-color="#FF8700"/><stop offset="0.551724" stop-color="#FF8800"/><stop offset="0.586207" stop-color="#FF8900"/><stop offset="0.62069" stop-color="#FF8A00"/><stop offset="0.655172" stop-color="#FF8A00"/><stop offset="0.689655" stop-color="#FF8B00"/><stop offset="0.724138" stop-color="#FF8C00"/><stop offset="0.758621" stop-color="#FF8C00"/><stop offset="0.793103" stop-color="#FF8D00"/><stop offset="0.827586" stop-color="#FF8D00"/><stop offset="0.862069" stop-color="#FF8E00"/><stop offset="0.896552" stop-color="#FF8E00"/><stop offset="0.931035" stop-color="#FF8E00"/><stop offset="0.965517" stop-color="#FF8E00"/><stop offset="1" stop-color="#FF8F00"/></linearGradient><clipPath id="clip29"><rect x="926" y="169" width="3708" height="1921"/></clipPath><linearGradient x1="4268" y1="1601.5" x2="4324" y2="1601.5" gradientUnits="userSpaceOnUse" spreadMethod="reflect" id="fill30"><stop offset="0" stop-color="#FF7101"/><stop offset="0.0344828" stop-color="#FF7200"/><stop offset="0.0689655" stop-color="#FF7400"/><stop offset="0.103448" stop-color="#FF7600"/><stop offset="0.137931" stop-color="#FF7800"/><stop offset="0.172414" stop-color="#FF7900"/><stop offset="0.206897" stop-color="#FF7B00"/><stop offset="0.241379" stop-color="#FF7D00"/><stop offset="0.275862" stop-color="#FF7E00"/><stop offset="0.310345" stop-color="#FF8000"/><stop offset="0.344828" stop-color="#FF8100"/><stop offset="0.37931" stop-color="#FF8200"/><stop offset="0.413793" stop-color="#FF8300"/><stop offset="0.448276" stop-color="#FF8500"/><stop offset="0.482759" stop-color="#FF8600"/><stop offset="0.517241" stop-color="#FF8700"/><stop offset="0.551724" stop-color="#FF8800"/><stop offset="0.586207" stop-color="#FF8900"/><stop offset="0.62069" stop-color="#FF8A00"/><stop offset="0.655172" stop-color="#FF8A00"/><stop offset="0.689655" stop-color="#FF8B00"/><stop offset="0.724138" stop-color="#FF8C00"/><stop offset="0.758621" stop-color="#FF8C00"/><stop offset="0.793103" stop-color="#FF8D00"/><stop offset="0.827586" stop-color="#FF8D00"/><stop offset="0.862069" stop-color="#FF8E00"/><stop offset="0.896552" stop-color="#FF8E00"/><stop offset="0.931035" stop-color="#FF8E00"/><stop offset="0.965517" stop-color="#FF8E00"/><stop offset="1" stop-color="#FF8F00"/></linearGradient><clipPath id="clip31"><rect x="926" y="169" width="3708" height="1921"/></clipPath><linearGradient x1="4515" y1="1207" x2="4571" y2="1207" gradientUnits="userSpaceOnUse" spreadMethod="reflect" id="fill32"><stop offset="0" stop-color="#FF7101"/><stop offset="0.0344828" stop-color="#FF7200"/><stop offset="0.0689655" stop-color="#FF7400"/><stop offset="0.103448" stop-color="#FF7600"/><stop offset="0.137931" stop-color="#FF7800"/><stop offset="0.172414" stop-color="#FF7900"/><stop offset="0.206897" stop-color="#FF7B00"/><stop offset="0.241379" stop-color="#FF7D00"/><stop offset="0.275862" stop-color="#FF7E00"/><stop offset="0.310345" stop-color="#FF8000"/><stop offset="0.344828" stop-color="#FF8100"/><stop offset="0.37931" stop-color="#FF8200"/><stop offset="0.413793" stop-color="#FF8300"/><stop offset="0.448276" stop-color="#FF8500"/><stop offset="0.482759" stop-color="#FF8600"/><stop offset="0.517241" stop-color="#FF8700"/><stop offset="0.551724" stop-color="#FF8800"/><stop offset="0.586207" stop-color="#FF8900"/><stop offset="0.62069" stop-color="#FF8A00"/><stop offset="0.655172" stop-color="#FF8A00"/><stop offset="0.689655" stop-color="#FF8B00"/><stop offset="0.724138" stop-color="#FF8C00"/><stop offset="0.758621" stop-color="#FF8C00"/><stop offset="0.793103" stop-color="#FF8D00"/><stop offset="0.827586" stop-color="#FF8D00"/><stop offset="0.862069" stop-color="#FF8E00"/><stop offset="0.896552" stop-color="#FF8E00"/><stop offset="0.931035" stop-color="#FF8E00"/><stop offset="0.965517" stop-color="#FF8E00"/><stop offset="1" stop-color="#FF8F00"/></linearGradient><clipPath id="clip33"><rect x="656" y="0" width="4307" height="2423"/></clipPath><clipPath id="clip34"><rect x="656" y="0" width="4307" height="2423"/></clipPath><clipPath id="clip35"><rect x="656" y="0" width="4307" height="2423"/></clipPath><clipPath id="clip36"><rect x="656" y="0" width="4307" height="2423"/></clipPath><clipPath id="clip37"><rect x="656" y="0" width="4307" height="2423"/></clipPath><clipPath id="clip38"><rect x="656" y="0" width="4307" height="2423"/></clipPath><clipPath id="clip39"><rect x="656" y="0" width="4307" height="2423"/></clipPath><clipPath id="clip40"><rect x="656" y="0" width="4307" height="2423"/></clipPath><clipPath id="clip41"><rect x="656" y="0" width="4307" height="2423"/></clipPath><clipPath id="clip42"><rect x="656" y="0" width="4307" height="2423"/></clipPath><clipPath id="clip43"><rect x="656" y="0" width="4307" height="2423"/></clipPath><clipPath id="clip44"><rect x="656" y="0" width="4307" height="2423"/></clipPath><clipPath id="clip45"><rect x="656" y="0" width="4307" height="2423"/></clipPath><clipPath id="clip46"><rect x="656" y="0" width="4307" height="2423"/></clipPath><clipPath id="clip47"><rect x="656" y="0" width="4307" height="2423"/></clipPath><clipPath id="clip48"><rect x="656" y="0" width="4307" height="2423"/></clipPath><clipPath id="clip49"><rect x="656" y="0" width="4307" height="2423"/></clipPath><clipPath id="clip50"><rect x="656" y="0" width="4307" height="2423"/></clipPath><clipPath id="clip51"><rect x="656" y="0" width="4307" height="2423"/></clipPath><clipPath id="clip52"><rect x="656" y="0" width="4307" height="2423"/></clipPath><clipPath id="clip53"><rect x="656" y="0" width="4307" height="2423"/></clipPath><clipPath id="clip54"><rect x="656" y="0" width="4307" height="2423"/></clipPath><clipPath id="clip55"><rect x="656" y="0" width="4307" height="2423"/></clipPath><clipPath id="clip56"><rect x="656" y="0" width="4307" height="2423"/></clipPath><clipPath id="clip57"><rect x="656" y="0" width="4307" height="2423"/></clipPath><clipPath id="clip58"><rect x="656" y="0" width="4307" height="2423"/></clipPath><clipPath id="clip59"><rect x="656" y="0" width="4307" height="2423"/></clipPath><clipPath id="clip60"><rect x="656" y="0" width="4307" height="2423"/></clipPath><clipPath id="clip61"><rect x="656" y="0" width="4307" height="2423"/></clipPath><clipPath id="clip62"><rect x="656" y="0" width="4307" height="2423"/></clipPath><clipPath id="clip63"><rect x="656" y="0" width="4307" height="2423"/></clipPath><clipPath id="clip64"><rect x="656" y="0" width="4307" height="2423"/></clipPath><clipPath id="clip65"><rect x="656" y="0" width="4307" height="2423"/></clipPath><clipPath id="clip66"><rect x="656" y="0" width="4307" height="2423"/></clipPath><clipPath id="clip67"><rect x="656" y="0" width="4307" height="2423"/></clipPath><clipPath id="clip68"><rect x="656" y="0" width="4307" height="2423"/></clipPath><clipPath id="clip69"><rect x="656" y="0" width="4307" height="2423"/></clipPath><clipPath id="clip70"><rect x="656" y="0" width="4307" height="2423"/></clipPath><clipPath id="clip71"><rect x="656" y="0" width="4307" height="2423"/></clipPath><clipPath id="clip72"><rect x="656" y="0" width="4307" height="2423"/></clipPath><clipPath id="clip73"><rect x="656" y="0" width="4307" height="2423"/></clipPath><clipPath id="clip74"><rect x="656" y="0" width="4307" height="2423"/></clipPath><clipPath id="clip75"><rect x="656" y="0" width="4307" height="2423"/></clipPath><clipPath id="clip76"><rect x="656" y="0" width="4307" height="2423"/></clipPath><clipPath id="clip77"><rect x="656" y="0" width="4307" height="2423"/></clipPath><clipPath id="clip78"><rect x="656" y="0" width="4307" height="2423"/></clipPath><clipPath id="clip79"><rect x="656" y="0" width="4307" height="2423"/></clipPath><clipPath id="clip80"><rect x="656" y="0" width="4307" height="2423"/></clipPath><clipPath id="clip81"><rect x="656" y="0" width="4307" height="2423"/></clipPath><clipPath id="clip82"><rect x="656" y="0" width="4307" height="2423"/></clipPath><clipPath id="clip83"><rect x="656" y="0" width="4307" height="2423"/></clipPath><clipPath id="clip84"><rect x="656" y="0" width="4307" height="2423"/></clipPath><clipPath id="clip85"><rect x="656" y="0" width="4307" height="2423"/></clipPath><clipPath id="clip86"><rect x="656" y="0" width="4307" height="2423"/></clipPath><clipPath id="clip87"><rect x="656" y="0" width="4307" height="2423"/></clipPath><clipPath id="clip88"><rect x="656" y="0" width="4307" height="2423"/></clipPath><clipPath id="clip89"><rect x="656" y="0" width="4307" height="2423"/></clipPath><clipPath id="clip90"><rect x="656" y="0" width="4307" height="2423"/></clipPath><clipPath id="clip91"><rect x="656" y="0" width="4307" height="2423"/></clipPath><clipPath id="clip92"><rect x="656" y="0" width="4307" height="2423"/></clipPath><clipPath id="clip93"><rect x="656" y="0" width="4307" height="2423"/></clipPath><clipPath id="clip94"><rect x="656" y="0" width="4307" height="2423"/></clipPath><clipPath id="clip95"><rect x="656" y="0" width="4307" height="2423"/></clipPath><clipPath id="clip96"><rect x="656" y="0" width="4307" height="2423"/></clipPath><clipPath id="clip97"><rect x="656" y="0" width="4307" height="2423"/></clipPath><clipPath id="clip98"><rect x="656" y="0" width="4307" height="2423"/></clipPath><clipPath id="clip99"><rect x="656" y="0" width="4307" height="2423"/></clipPath><clipPath id="clip100"><rect x="656" y="0" width="4307" height="2423"/></clipPath><clipPath id="clip101"><rect x="656" y="0" width="4307" height="2423"/></clipPath><clipPath id="clip102"><rect x="656" y="0" width="4307" height="2423"/></clipPath><linearGradient x1="4703" y1="1310.5" x2="4726" y2="1310.5" gradientUnits="userSpaceOnUse" spreadMethod="reflect" id="fill103"><stop offset="0" stop-color="#FF7101"/><stop offset="0.0344828" stop-color="#FF7200"/><stop offset="0.0689655" stop-color="#FF7400"/><stop offset="0.103448" stop-color="#FF7600"/><stop offset="0.137931" stop-color="#FF7800"/><stop offset="0.172414" stop-color="#FF7900"/><stop offset="0.206897" stop-color="#FF7B00"/><stop offset="0.241379" stop-color="#FF7D00"/><stop offset="0.275862" stop-color="#FF7E00"/><stop offset="0.310345" stop-color="#FF8000"/><stop offset="0.344828" stop-color="#FF8100"/><stop offset="0.37931" stop-color="#FF8200"/><stop offset="0.413793" stop-color="#FF8300"/><stop offset="0.448276" stop-color="#FF8500"/><stop offset="0.482759" stop-color="#FF8600"/><stop offset="0.517241" stop-color="#FF8700"/><stop offset="0.551724" stop-color="#FF8800"/><stop offset="0.586207" stop-color="#FF8900"/><stop offset="0.62069" stop-color="#FF8A00"/><stop offset="0.655172" stop-color="#FF8A00"/><stop offset="0.689655" stop-color="#FF8B00"/><stop offset="0.724138" stop-color="#FF8C00"/><stop offset="0.758621" stop-color="#FF8C00"/><stop offset="0.793103" stop-color="#FF8D00"/><stop offset="0.827586" stop-color="#FF8D00"/><stop offset="0.862069" stop-color="#FF8E00"/><stop offset="0.896552" stop-color="#FF8E00"/><stop offset="0.931035" stop-color="#FF8E00"/><stop offset="0.965517" stop-color="#FF8E00"/><stop offset="1" stop-color="#FF8F00"/></linearGradient><clipPath id="clip104"><rect x="656" y="0" width="4307" height="2423"/></clipPath></defs><g clip-path="url(#clip0)" transform="translate(-656 1)"><rect x="657" y="0" width="4307" height="2423" fill="#FFFFFF"/><g clip-path="url(#clip1)"><path d="M926.5 1874.56 4631.5 1874.56M926.5 1661.55 4631.5 1661.55M926.5 1448.55 4631.5 1448.55M926.5 1235.54 4631.5 1235.54M926.5 1022.53 4631.5 1022.53M926.5 809.527 4631.5 809.527M926.5 596.52 4631.5 596.52M926.5 383.513 4631.5 383.513M926.5 169.5 4631.5 169.5" stroke="#D9D9D9" stroke-width="3.4375" stroke-linejoin="round" stroke-miterlimit="10" fill="none"/></g><g clip-path="url(#clip2)"><path d="M987.032 2081.07 1042.03 2081.07 1042.03 2087 987.032 2087ZM1234.04 2081.07 1289.04 2081.07 1289.04 2087 1234.04 2087ZM1481.05 2080.07 1536.05 2080.07 1536.05 2087 1481.05 2087ZM1728.06 2040.07 1783.06 2040.07 1783.06 2087 1728.06 2087ZM1975.06 2057.07 2030.07 2057.07 2030.07 2087 1975.06 2087ZM2222.07 2081.07 2277.07 2081.07 2277.07 2087 2222.07 2087ZM2469.08 2081.07 2524.08 2081.07 2524.08 2087 2469.08 2087ZM2716.09 2071.07 2771.09 2071.07 2771.09 2087 2716.09 2087ZM2963.1 2052.07 3018.1 2052.07 3018.1 2087 2963.1 2087ZM3210.11 2035.07 3265.11 2035.07 3265.11 2087 3210.11 2087ZM3457.11 2078.07 3512.12 2078.07 3512.12 2087 3457.11 2087ZM3704.12 2073.07 3759.12 2073.07 3759.12 2087 3704.12 2087ZM3951.13 2028.07 4006.13 2028.07 4006.13 2087 3951.13 2087ZM4198.14 1662.05 4253.14 1662.05 4253.14 2087 4198.14 2087ZM4445.15 1450.05 4500.15 1450.05 4500.15 2087 4445.15 2087Z" fill="#C00000"/></g><g clip-path="url(#clip3)"><rect x="1057" y="2071" width="56" height="15.9998" fill="url(#fill4)"/></g><g clip-path="url(#clip5)"><rect x="1304" y="2070" width="56" height="16.9998" fill="url(#fill6)"/></g><g clip-path="url(#clip7)"><rect x="1551" y="2058" width="56" height="28.9998" fill="url(#fill8)"/></g><g clip-path="url(#clip9)"><rect x="1798" y="1942" width="55.9996" height="145" fill="url(#fill10)"/></g><g clip-path="url(#clip11)"><rect x="2045" y="1843" width="56" height="244" fill="url(#fill12)"/></g><g clip-path="url(#clip13)"><rect x="2292" y="2069" width="56" height="17.9998" fill="url(#fill14)"/></g><g clip-path="url(#clip15)"><rect x="2539" y="2066" width="56" height="21" fill="url(#fill16)"/></g><g clip-path="url(#clip17)"><rect x="2786" y="2023" width="56" height="64" fill="url(#fill18)"/></g><g clip-path="url(#clip19)"><rect x="3033" y="1793" width="56" height="294" fill="url(#fill20)"/></g><g clip-path="url(#clip21)"><rect x="3280" y="1585" width="56" height="502" fill="url(#fill22)"/></g><g clip-path="url(#clip23)"><rect x="3527" y="2051" width="56" height="35.9998" fill="url(#fill24)"/></g><g clip-path="url(#clip25)"><rect x="3774" y="2026" width="56" height="60.9999" fill="url(#fill26)"/></g><g clip-path="url(#clip27)"><rect x="4021" y="1810" width="56" height="277" fill="url(#fill28)"/></g><g clip-path="url(#clip29)"><rect x="4268" y="1116" width="56" height="971" fill="url(#fill30)"/></g><g clip-path="url(#clip31)"><rect x="4515" y="327" width="55.9995" height="1760" fill="url(#fill32)"/></g><g clip-path="url(#clip33)"><path d="M926.5 2087.5 4631.5 2087.5" stroke="#D9D9D9" stroke-width="3.4375" stroke-linejoin="round" stroke-miterlimit="10" fill="none" fill-rule="evenodd"/></g><g clip-path="url(#clip34)"><path d="M926.5 2087.5 926.5 2167.5M2161.57 2087.5 2161.57 2167.5M3396.61 2087.5 3396.61 2167.5M4631.5 2087.5 4631.5 2167.5" stroke="#D9D9D9" stroke-width="3.4375" stroke-linejoin="round" stroke-miterlimit="10" fill="none"/></g><g clip-path="url(#clip35)"><path d="M926.5 2167.5 926.5 2247.5M2161.57 2167.5 2161.57 2247.5M3396.61 2167.5 3396.61 2247.5M4631.5 2167.5 4631.5 2247.5" stroke="#D9D9D9" stroke-width="3.4375" stroke-linejoin="round" stroke-miterlimit="10" fill="none"/></g><g clip-path="url(#clip36)"><text fill="#404040" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 983.296 2049)">157</text></g><g clip-path="url(#clip37)"><text fill="#404040" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 1230.3 2049)">153</text></g><g clip-path="url(#clip38)"><text fill="#404040" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 1477.31 2048)">182</text></g><g clip-path="url(#clip39)"><text fill="#404040" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 1713.87 2008)">1106</text></g><g clip-path="url(#clip40)"><text fill="#404040" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 1971.33 2025)">716</text></g><g clip-path="url(#clip41)"><text fill="#404040" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 2218.33 2049)">151</text></g><g clip-path="url(#clip42)"><text fill="#404040" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 2465.34 2049)">161</text></g><g clip-path="url(#clip43)"><text fill="#404040" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 2712.35 2039)">376</text></g><g clip-path="url(#clip44)"><text fill="#404040" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 2959.36 2021)">820</text></g><g clip-path="url(#clip45)"><text fill="#404040" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 3195.92 2003)">1222</text></g><g clip-path="url(#clip46)"><text fill="#404040" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 3453.37 2046)">227</text></g><g clip-path="url(#clip47)"><text fill="#404040" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 3700.38 2042)">328</text></g><g clip-path="url(#clip48)"><text fill="#404040" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 3936.94 1996)">1403</text></g><g clip-path="url(#clip49)"><text fill="#404040" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 4183.95 1630)">9977</text></g><g clip-path="url(#clip50)"><text fill="#404040" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 4420.5 1418)">14966</text></g><g clip-path="url(#clip51)"><text fill="#404040" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 1053.63 2039)">378</text></g><g clip-path="url(#clip52)"><text fill="#404040" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 1300.64 2038)">401</text></g><g clip-path="url(#clip53)"><text fill="#404040" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 1547.65 2026)">701</text></g><g clip-path="url(#clip54)"><text fill="#404040" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 1784.21 1910)">3417</text></g><g clip-path="url(#clip55)"><text fill="#404040" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 2031.21 1811)">5747</text></g><g clip-path="url(#clip56)"><text fill="#404040" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 2288.67 2037)">426</text></g><g clip-path="url(#clip57)"><text fill="#404040" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 2535.68 2034)">493</text></g><g clip-path="url(#clip58)"><text fill="#404040" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 2772.24 1991)">1518</text></g><g clip-path="url(#clip59)"><text fill="#404040" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 3019.24 1761)">6905</text></g><g clip-path="url(#clip60)"><text fill="#404040" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 3255.8 1553)">11800</text></g><g clip-path="url(#clip61)"><text fill="#404040" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 3523.71 2019)">855</text></g><g clip-path="url(#clip62)"><text fill="#404040" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 3760.27 1994)">1434</text></g><g clip-path="url(#clip63)"><text fill="#404040" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 4007.28 1778)">6513</text></g><g clip-path="url(#clip64)"><text fill="#404040" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 4243.83 1084)">22800</text></g><g clip-path="url(#clip65)"><text fill="#404040" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 4490.84 295)">41311</text></g><g clip-path="url(#clip66)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 867.144 2099)">0</text></g><g clip-path="url(#clip67)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 804.444 1886)">5000</text></g><g clip-path="url(#clip68)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 783.544 1673)">10000</text></g><g clip-path="url(#clip69)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 783.544 1460)">15000</text></g><g clip-path="url(#clip70)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 783.544 1247)">20000</text></g><g clip-path="url(#clip71)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 783.544 1034)">25000</text></g><g clip-path="url(#clip72)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 783.544 821)">30000</text></g><g clip-path="url(#clip73)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 783.544 608)">35000</text></g><g clip-path="url(#clip74)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 783.544 395)">40000</text></g><g clip-path="url(#clip75)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 783.544 182)">45000</text></g><g clip-path="url(#clip76)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 1028.91 2153)">20</text></g><g clip-path="url(#clip77)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 1275.92 2153)">32</text></g><g clip-path="url(#clip78)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 1512.48 2153)">128</text></g><g clip-path="url(#clip79)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 1759.49 2153)">384</text></g><g clip-path="url(#clip80)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 2006.5 2153)">512</text></g><g clip-path="url(#clip81)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 2263.95 2153)">20</text></g><g clip-path="url(#clip82)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 2510.96 2153)">32</text></g><g clip-path="url(#clip83)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 2747.52 2153)">128</text></g><g clip-path="url(#clip84)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 2994.53 2153)">384</text></g><g clip-path="url(#clip85)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 3241.53 2153)">512</text></g><g clip-path="url(#clip86)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 3498.99 2153)">20</text></g><g clip-path="url(#clip87)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 3746 2153)">32</text></g><g clip-path="url(#clip88)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 3982.56 2153)">128</text></g><g clip-path="url(#clip89)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 4229.57 2153)">384</text></g><g clip-path="url(#clip90)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 4476.57 2153)">512</text></g><g clip-path="url(#clip91)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 1522.93 2233)">16</text></g><g clip-path="url(#clip92)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 2757.97 2233)">32</text></g><g clip-path="url(#clip93)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 3982.56 2233)">128</text></g><g clip-path="url(#clip94)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="46" transform="matrix(6.12323e-17 -1 1 6.12323e-17 757.854 1247)">Latency (ms)</text></g><g clip-path="url(#clip95)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="46" transform="matrix(1 0 0 1 2619.55 2298)">Sequence Length</text></g><g clip-path="url(#clip96)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="46" transform="matrix(1 0 0 1 2684.59 2354)">Batch Size</text></g><g clip-path="url(#clip97)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="64" transform="matrix(1 0 0 1 1507.09 95)">PyTorch &amp; TensorFlow Eager </text></g><g clip-path="url(#clip98)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="64" transform="matrix(1 0 0 1 2254.27 95)">-</text></g><g clip-path="url(#clip99)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="64" transform="matrix(1 0 0 1 2288.41 95)">Latency measurement for big batch size and default threading settings </text></g><g clip-path="url(#clip100)"><rect x="4703" y="1222" width="23" height="22" fill="#C00000"/></g><g clip-path="url(#clip101)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 4735.75 1244)">pytorch</text></g><g clip-path="url(#clip102)"><rect x="4703" y="1299" width="23" height="23" fill="url(#fill103)"/></g><g clip-path="url(#clip104)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 4735.75 1322)">tensorflow</text></g><rect x="657.5" y="0.499836" width="4307" height="2423" stroke="#D9D9D9" stroke-width="3.4375" stroke-linejoin="round" stroke-miterlimit="10" fill="none"/></g></svg>
0
0
hf_public_repos/blog/assets/21_bert_cpu_scaling_part_1
hf_public_repos/blog/assets/21_bert_cpu_scaling_part_1/imgs/batch_scaling_exp.svg
<svg width="4296" height="2447" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" overflow="hidden"><defs><clipPath id="clip0"><rect x="518" y="291" width="4296" height="2447"/></clipPath><clipPath id="clip1"><rect x="519" y="292" width="4293" height="2444"/></clipPath><clipPath id="clip2"><rect x="767" y="462" width="3716" height="1669"/></clipPath><clipPath id="clip3"><rect x="767" y="462" width="3716" height="1669"/></clipPath><linearGradient x1="866" y1="2056" x2="908" y2="2056" gradientUnits="userSpaceOnUse" spreadMethod="reflect" id="fill4"><stop offset="0" stop-color="#FF7101"/><stop offset="0.0344828" stop-color="#FF7200"/><stop offset="0.0689655" stop-color="#FF7400"/><stop offset="0.103448" stop-color="#FF7600"/><stop offset="0.137931" stop-color="#FF7800"/><stop offset="0.172414" stop-color="#FF7900"/><stop offset="0.206897" stop-color="#FF7B00"/><stop offset="0.241379" stop-color="#FF7D00"/><stop offset="0.275862" stop-color="#FF7E00"/><stop offset="0.310345" stop-color="#FF8000"/><stop offset="0.344828" stop-color="#FF8100"/><stop offset="0.37931" stop-color="#FF8200"/><stop offset="0.413793" stop-color="#FF8300"/><stop offset="0.448276" stop-color="#FF8500"/><stop offset="0.482759" stop-color="#FF8600"/><stop offset="0.517241" stop-color="#FF8700"/><stop offset="0.551724" stop-color="#FF8800"/><stop offset="0.586207" stop-color="#FF8900"/><stop offset="0.62069" stop-color="#FF8A00"/><stop offset="0.655172" stop-color="#FF8A00"/><stop offset="0.689655" stop-color="#FF8B00"/><stop offset="0.724138" stop-color="#FF8C00"/><stop offset="0.758621" stop-color="#FF8C00"/><stop offset="0.793103" stop-color="#FF8D00"/><stop offset="0.827586" stop-color="#FF8D00"/><stop offset="0.862069" stop-color="#FF8E00"/><stop offset="0.896552" stop-color="#FF8E00"/><stop offset="0.931035" stop-color="#FF8E00"/><stop offset="0.965517" stop-color="#FF8E00"/><stop offset="1" stop-color="#FF8F00"/></linearGradient><clipPath id="clip5"><rect x="767" y="462" width="3716" height="1669"/></clipPath><linearGradient x1="1052" y1="2091" x2="1094" y2="2091" gradientUnits="userSpaceOnUse" spreadMethod="reflect" id="fill6"><stop offset="0" stop-color="#FF7101"/><stop offset="0.0344828" stop-color="#FF7200"/><stop offset="0.0689655" stop-color="#FF7400"/><stop offset="0.103448" stop-color="#FF7600"/><stop offset="0.137931" stop-color="#FF7800"/><stop offset="0.172414" stop-color="#FF7900"/><stop offset="0.206897" stop-color="#FF7B00"/><stop offset="0.241379" stop-color="#FF7D00"/><stop offset="0.275862" stop-color="#FF7E00"/><stop offset="0.310345" stop-color="#FF8000"/><stop offset="0.344828" stop-color="#FF8100"/><stop offset="0.37931" stop-color="#FF8200"/><stop offset="0.413793" stop-color="#FF8300"/><stop offset="0.448276" stop-color="#FF8500"/><stop offset="0.482759" stop-color="#FF8600"/><stop offset="0.517241" stop-color="#FF8700"/><stop offset="0.551724" stop-color="#FF8800"/><stop offset="0.586207" stop-color="#FF8900"/><stop offset="0.62069" stop-color="#FF8A00"/><stop offset="0.655172" stop-color="#FF8A00"/><stop offset="0.689655" stop-color="#FF8B00"/><stop offset="0.724138" stop-color="#FF8C00"/><stop offset="0.758621" stop-color="#FF8C00"/><stop offset="0.793103" stop-color="#FF8D00"/><stop offset="0.827586" stop-color="#FF8D00"/><stop offset="0.862069" stop-color="#FF8E00"/><stop offset="0.896552" stop-color="#FF8E00"/><stop offset="0.931035" stop-color="#FF8E00"/><stop offset="0.965517" stop-color="#FF8E00"/><stop offset="1" stop-color="#FF8F00"/></linearGradient><clipPath id="clip7"><rect x="767" y="462" width="3716" height="1669"/></clipPath><linearGradient x1="1238" y1="2094" x2="1279" y2="2094" gradientUnits="userSpaceOnUse" spreadMethod="reflect" id="fill8"><stop offset="0" stop-color="#FF7101"/><stop offset="0.0344828" stop-color="#FF7200"/><stop offset="0.0689655" stop-color="#FF7400"/><stop offset="0.103448" stop-color="#FF7600"/><stop offset="0.137931" stop-color="#FF7800"/><stop offset="0.172414" stop-color="#FF7900"/><stop offset="0.206897" stop-color="#FF7B00"/><stop offset="0.241379" stop-color="#FF7D00"/><stop offset="0.275862" stop-color="#FF7E00"/><stop offset="0.310345" stop-color="#FF8000"/><stop offset="0.344828" stop-color="#FF8100"/><stop offset="0.37931" stop-color="#FF8200"/><stop offset="0.413793" stop-color="#FF8300"/><stop offset="0.448276" stop-color="#FF8500"/><stop offset="0.482759" stop-color="#FF8600"/><stop offset="0.517241" stop-color="#FF8700"/><stop offset="0.551724" stop-color="#FF8800"/><stop offset="0.586207" stop-color="#FF8900"/><stop offset="0.62069" stop-color="#FF8A00"/><stop offset="0.655172" stop-color="#FF8A00"/><stop offset="0.689655" stop-color="#FF8B00"/><stop offset="0.724138" stop-color="#FF8C00"/><stop offset="0.758621" stop-color="#FF8C00"/><stop offset="0.793103" stop-color="#FF8D00"/><stop offset="0.827586" stop-color="#FF8D00"/><stop offset="0.862069" stop-color="#FF8E00"/><stop offset="0.896552" stop-color="#FF8E00"/><stop offset="0.931035" stop-color="#FF8E00"/><stop offset="0.965517" stop-color="#FF8E00"/><stop offset="1" stop-color="#FF8F00"/></linearGradient><clipPath id="clip9"><rect x="767" y="462" width="3716" height="1669"/></clipPath><linearGradient x1="1423" y1="2093.5" x2="1465" y2="2093.5" gradientUnits="userSpaceOnUse" spreadMethod="reflect" id="fill10"><stop offset="0" stop-color="#FF7101"/><stop offset="0.0344828" stop-color="#FF7200"/><stop offset="0.0689655" stop-color="#FF7400"/><stop offset="0.103448" stop-color="#FF7600"/><stop offset="0.137931" stop-color="#FF7800"/><stop offset="0.172414" stop-color="#FF7900"/><stop offset="0.206897" stop-color="#FF7B00"/><stop offset="0.241379" stop-color="#FF7D00"/><stop offset="0.275862" stop-color="#FF7E00"/><stop offset="0.310345" stop-color="#FF8000"/><stop offset="0.344828" stop-color="#FF8100"/><stop offset="0.37931" stop-color="#FF8200"/><stop offset="0.413793" stop-color="#FF8300"/><stop offset="0.448276" stop-color="#FF8500"/><stop offset="0.482759" stop-color="#FF8600"/><stop offset="0.517241" stop-color="#FF8700"/><stop offset="0.551724" stop-color="#FF8800"/><stop offset="0.586207" stop-color="#FF8900"/><stop offset="0.62069" stop-color="#FF8A00"/><stop offset="0.655172" stop-color="#FF8A00"/><stop offset="0.689655" stop-color="#FF8B00"/><stop offset="0.724138" stop-color="#FF8C00"/><stop offset="0.758621" stop-color="#FF8C00"/><stop offset="0.793103" stop-color="#FF8D00"/><stop offset="0.827586" stop-color="#FF8D00"/><stop offset="0.862069" stop-color="#FF8E00"/><stop offset="0.896552" stop-color="#FF8E00"/><stop offset="0.931035" stop-color="#FF8E00"/><stop offset="0.965517" stop-color="#FF8E00"/><stop offset="1" stop-color="#FF8F00"/></linearGradient><clipPath id="clip11"><rect x="767" y="462" width="3716" height="1669"/></clipPath><linearGradient x1="1609" y1="2053" x2="1650" y2="2053" gradientUnits="userSpaceOnUse" spreadMethod="reflect" id="fill12"><stop offset="0" stop-color="#FF7101"/><stop offset="0.0344828" stop-color="#FF7200"/><stop offset="0.0689655" stop-color="#FF7400"/><stop offset="0.103448" stop-color="#FF7600"/><stop offset="0.137931" stop-color="#FF7800"/><stop offset="0.172414" stop-color="#FF7900"/><stop offset="0.206897" stop-color="#FF7B00"/><stop offset="0.241379" stop-color="#FF7D00"/><stop offset="0.275862" stop-color="#FF7E00"/><stop offset="0.310345" stop-color="#FF8000"/><stop offset="0.344828" stop-color="#FF8100"/><stop offset="0.37931" stop-color="#FF8200"/><stop offset="0.413793" stop-color="#FF8300"/><stop offset="0.448276" stop-color="#FF8500"/><stop offset="0.482759" stop-color="#FF8600"/><stop offset="0.517241" stop-color="#FF8700"/><stop offset="0.551724" stop-color="#FF8800"/><stop offset="0.586207" stop-color="#FF8900"/><stop offset="0.62069" stop-color="#FF8A00"/><stop offset="0.655172" stop-color="#FF8A00"/><stop offset="0.689655" stop-color="#FF8B00"/><stop offset="0.724138" stop-color="#FF8C00"/><stop offset="0.758621" stop-color="#FF8C00"/><stop offset="0.793103" stop-color="#FF8D00"/><stop offset="0.827586" stop-color="#FF8D00"/><stop offset="0.862069" stop-color="#FF8E00"/><stop offset="0.896552" stop-color="#FF8E00"/><stop offset="0.931035" stop-color="#FF8E00"/><stop offset="0.965517" stop-color="#FF8E00"/><stop offset="1" stop-color="#FF8F00"/></linearGradient><clipPath id="clip13"><rect x="767" y="462" width="3716" height="1669"/></clipPath><linearGradient x1="1794" y1="2077" x2="1836" y2="2077" gradientUnits="userSpaceOnUse" spreadMethod="reflect" id="fill14"><stop offset="0" stop-color="#FF7101"/><stop offset="0.0344828" stop-color="#FF7200"/><stop offset="0.0689655" stop-color="#FF7400"/><stop offset="0.103448" stop-color="#FF7600"/><stop offset="0.137931" stop-color="#FF7800"/><stop offset="0.172414" stop-color="#FF7900"/><stop offset="0.206897" stop-color="#FF7B00"/><stop offset="0.241379" stop-color="#FF7D00"/><stop offset="0.275862" stop-color="#FF7E00"/><stop offset="0.310345" stop-color="#FF8000"/><stop offset="0.344828" stop-color="#FF8100"/><stop offset="0.37931" stop-color="#FF8200"/><stop offset="0.413793" stop-color="#FF8300"/><stop offset="0.448276" stop-color="#FF8500"/><stop offset="0.482759" stop-color="#FF8600"/><stop offset="0.517241" stop-color="#FF8700"/><stop offset="0.551724" stop-color="#FF8800"/><stop offset="0.586207" stop-color="#FF8900"/><stop offset="0.62069" stop-color="#FF8A00"/><stop offset="0.655172" stop-color="#FF8A00"/><stop offset="0.689655" stop-color="#FF8B00"/><stop offset="0.724138" stop-color="#FF8C00"/><stop offset="0.758621" stop-color="#FF8C00"/><stop offset="0.793103" stop-color="#FF8D00"/><stop offset="0.827586" stop-color="#FF8D00"/><stop offset="0.862069" stop-color="#FF8E00"/><stop offset="0.896552" stop-color="#FF8E00"/><stop offset="0.931035" stop-color="#FF8E00"/><stop offset="0.965517" stop-color="#FF8E00"/><stop offset="1" stop-color="#FF8F00"/></linearGradient><clipPath id="clip15"><rect x="767" y="462" width="3716" height="1669"/></clipPath><linearGradient x1="1980" y1="2085" x2="2022" y2="2085" gradientUnits="userSpaceOnUse" spreadMethod="reflect" id="fill16"><stop offset="0" stop-color="#FF7101"/><stop offset="0.0344828" stop-color="#FF7200"/><stop offset="0.0689655" stop-color="#FF7400"/><stop offset="0.103448" stop-color="#FF7600"/><stop offset="0.137931" stop-color="#FF7800"/><stop offset="0.172414" stop-color="#FF7900"/><stop offset="0.206897" stop-color="#FF7B00"/><stop offset="0.241379" stop-color="#FF7D00"/><stop offset="0.275862" stop-color="#FF7E00"/><stop offset="0.310345" stop-color="#FF8000"/><stop offset="0.344828" stop-color="#FF8100"/><stop offset="0.37931" stop-color="#FF8200"/><stop offset="0.413793" stop-color="#FF8300"/><stop offset="0.448276" stop-color="#FF8500"/><stop offset="0.482759" stop-color="#FF8600"/><stop offset="0.517241" stop-color="#FF8700"/><stop offset="0.551724" stop-color="#FF8800"/><stop offset="0.586207" stop-color="#FF8900"/><stop offset="0.62069" stop-color="#FF8A00"/><stop offset="0.655172" stop-color="#FF8A00"/><stop offset="0.689655" stop-color="#FF8B00"/><stop offset="0.724138" stop-color="#FF8C00"/><stop offset="0.758621" stop-color="#FF8C00"/><stop offset="0.793103" stop-color="#FF8D00"/><stop offset="0.827586" stop-color="#FF8D00"/><stop offset="0.862069" stop-color="#FF8E00"/><stop offset="0.896552" stop-color="#FF8E00"/><stop offset="0.931035" stop-color="#FF8E00"/><stop offset="0.965517" stop-color="#FF8E00"/><stop offset="1" stop-color="#FF8F00"/></linearGradient><clipPath id="clip17"><rect x="767" y="462" width="3716" height="1669"/></clipPath><linearGradient x1="2166" y1="2093" x2="2207" y2="2093" gradientUnits="userSpaceOnUse" spreadMethod="reflect" id="fill18"><stop offset="0" stop-color="#FF7101"/><stop offset="0.0344828" stop-color="#FF7200"/><stop offset="0.0689655" stop-color="#FF7400"/><stop offset="0.103448" stop-color="#FF7600"/><stop offset="0.137931" stop-color="#FF7800"/><stop offset="0.172414" stop-color="#FF7900"/><stop offset="0.206897" stop-color="#FF7B00"/><stop offset="0.241379" stop-color="#FF7D00"/><stop offset="0.275862" stop-color="#FF7E00"/><stop offset="0.310345" stop-color="#FF8000"/><stop offset="0.344828" stop-color="#FF8100"/><stop offset="0.37931" stop-color="#FF8200"/><stop offset="0.413793" stop-color="#FF8300"/><stop offset="0.448276" stop-color="#FF8500"/><stop offset="0.482759" stop-color="#FF8600"/><stop offset="0.517241" stop-color="#FF8700"/><stop offset="0.551724" stop-color="#FF8800"/><stop offset="0.586207" stop-color="#FF8900"/><stop offset="0.62069" stop-color="#FF8A00"/><stop offset="0.655172" stop-color="#FF8A00"/><stop offset="0.689655" stop-color="#FF8B00"/><stop offset="0.724138" stop-color="#FF8C00"/><stop offset="0.758621" stop-color="#FF8C00"/><stop offset="0.793103" stop-color="#FF8D00"/><stop offset="0.827586" stop-color="#FF8D00"/><stop offset="0.862069" stop-color="#FF8E00"/><stop offset="0.896552" stop-color="#FF8E00"/><stop offset="0.931035" stop-color="#FF8E00"/><stop offset="0.965517" stop-color="#FF8E00"/><stop offset="1" stop-color="#FF8F00"/></linearGradient><clipPath id="clip19"><rect x="767" y="462" width="3716" height="1669"/></clipPath><linearGradient x1="2351" y1="1995.5" x2="2393" y2="1995.5" gradientUnits="userSpaceOnUse" spreadMethod="reflect" id="fill20"><stop offset="0" stop-color="#FF7101"/><stop offset="0.0344828" stop-color="#FF7200"/><stop offset="0.0689655" stop-color="#FF7400"/><stop offset="0.103448" stop-color="#FF7600"/><stop offset="0.137931" stop-color="#FF7800"/><stop offset="0.172414" stop-color="#FF7900"/><stop offset="0.206897" stop-color="#FF7B00"/><stop offset="0.241379" stop-color="#FF7D00"/><stop offset="0.275862" stop-color="#FF7E00"/><stop offset="0.310345" stop-color="#FF8000"/><stop offset="0.344828" stop-color="#FF8100"/><stop offset="0.37931" stop-color="#FF8200"/><stop offset="0.413793" stop-color="#FF8300"/><stop offset="0.448276" stop-color="#FF8500"/><stop offset="0.482759" stop-color="#FF8600"/><stop offset="0.517241" stop-color="#FF8700"/><stop offset="0.551724" stop-color="#FF8800"/><stop offset="0.586207" stop-color="#FF8900"/><stop offset="0.62069" stop-color="#FF8A00"/><stop offset="0.655172" stop-color="#FF8A00"/><stop offset="0.689655" stop-color="#FF8B00"/><stop offset="0.724138" stop-color="#FF8C00"/><stop offset="0.758621" stop-color="#FF8C00"/><stop offset="0.793103" stop-color="#FF8D00"/><stop offset="0.827586" stop-color="#FF8D00"/><stop offset="0.862069" stop-color="#FF8E00"/><stop offset="0.896552" stop-color="#FF8E00"/><stop offset="0.931035" stop-color="#FF8E00"/><stop offset="0.965517" stop-color="#FF8E00"/><stop offset="1" stop-color="#FF8F00"/></linearGradient><clipPath id="clip21"><rect x="767" y="462" width="3716" height="1669"/></clipPath><linearGradient x1="2537" y1="2039" x2="2578" y2="2039" gradientUnits="userSpaceOnUse" spreadMethod="reflect" id="fill22"><stop offset="0" stop-color="#FF7101"/><stop offset="0.0344828" stop-color="#FF7200"/><stop offset="0.0689655" stop-color="#FF7400"/><stop offset="0.103448" stop-color="#FF7600"/><stop offset="0.137931" stop-color="#FF7800"/><stop offset="0.172414" stop-color="#FF7900"/><stop offset="0.206897" stop-color="#FF7B00"/><stop offset="0.241379" stop-color="#FF7D00"/><stop offset="0.275862" stop-color="#FF7E00"/><stop offset="0.310345" stop-color="#FF8000"/><stop offset="0.344828" stop-color="#FF8100"/><stop offset="0.37931" stop-color="#FF8200"/><stop offset="0.413793" stop-color="#FF8300"/><stop offset="0.448276" stop-color="#FF8500"/><stop offset="0.482759" stop-color="#FF8600"/><stop offset="0.517241" stop-color="#FF8700"/><stop offset="0.551724" stop-color="#FF8800"/><stop offset="0.586207" stop-color="#FF8900"/><stop offset="0.62069" stop-color="#FF8A00"/><stop offset="0.655172" stop-color="#FF8A00"/><stop offset="0.689655" stop-color="#FF8B00"/><stop offset="0.724138" stop-color="#FF8C00"/><stop offset="0.758621" stop-color="#FF8C00"/><stop offset="0.793103" stop-color="#FF8D00"/><stop offset="0.827586" stop-color="#FF8D00"/><stop offset="0.862069" stop-color="#FF8E00"/><stop offset="0.896552" stop-color="#FF8E00"/><stop offset="0.931035" stop-color="#FF8E00"/><stop offset="0.965517" stop-color="#FF8E00"/><stop offset="1" stop-color="#FF8F00"/></linearGradient><clipPath id="clip23"><rect x="767" y="462" width="3716" height="1669"/></clipPath><linearGradient x1="2722" y1="2042.5" x2="2764" y2="2042.5" gradientUnits="userSpaceOnUse" spreadMethod="reflect" id="fill24"><stop offset="0" stop-color="#FF7101"/><stop offset="0.0344828" stop-color="#FF7200"/><stop offset="0.0689655" stop-color="#FF7400"/><stop offset="0.103448" stop-color="#FF7600"/><stop offset="0.137931" stop-color="#FF7800"/><stop offset="0.172414" stop-color="#FF7900"/><stop offset="0.206897" stop-color="#FF7B00"/><stop offset="0.241379" stop-color="#FF7D00"/><stop offset="0.275862" stop-color="#FF7E00"/><stop offset="0.310345" stop-color="#FF8000"/><stop offset="0.344828" stop-color="#FF8100"/><stop offset="0.37931" stop-color="#FF8200"/><stop offset="0.413793" stop-color="#FF8300"/><stop offset="0.448276" stop-color="#FF8500"/><stop offset="0.482759" stop-color="#FF8600"/><stop offset="0.517241" stop-color="#FF8700"/><stop offset="0.551724" stop-color="#FF8800"/><stop offset="0.586207" stop-color="#FF8900"/><stop offset="0.62069" stop-color="#FF8A00"/><stop offset="0.655172" stop-color="#FF8A00"/><stop offset="0.689655" stop-color="#FF8B00"/><stop offset="0.724138" stop-color="#FF8C00"/><stop offset="0.758621" stop-color="#FF8C00"/><stop offset="0.793103" stop-color="#FF8D00"/><stop offset="0.827586" stop-color="#FF8D00"/><stop offset="0.862069" stop-color="#FF8E00"/><stop offset="0.896552" stop-color="#FF8E00"/><stop offset="0.931035" stop-color="#FF8E00"/><stop offset="0.965517" stop-color="#FF8E00"/><stop offset="1" stop-color="#FF8F00"/></linearGradient><clipPath id="clip25"><rect x="767" y="462" width="3716" height="1669"/></clipPath><linearGradient x1="2908" y1="2040.5" x2="2950" y2="2040.5" gradientUnits="userSpaceOnUse" spreadMethod="reflect" id="fill26"><stop offset="0" stop-color="#FF7101"/><stop offset="0.0344828" stop-color="#FF7200"/><stop offset="0.0689655" stop-color="#FF7400"/><stop offset="0.103448" stop-color="#FF7600"/><stop offset="0.137931" stop-color="#FF7800"/><stop offset="0.172414" stop-color="#FF7900"/><stop offset="0.206897" stop-color="#FF7B00"/><stop offset="0.241379" stop-color="#FF7D00"/><stop offset="0.275862" stop-color="#FF7E00"/><stop offset="0.310345" stop-color="#FF8000"/><stop offset="0.344828" stop-color="#FF8100"/><stop offset="0.37931" stop-color="#FF8200"/><stop offset="0.413793" stop-color="#FF8300"/><stop offset="0.448276" stop-color="#FF8500"/><stop offset="0.482759" stop-color="#FF8600"/><stop offset="0.517241" stop-color="#FF8700"/><stop offset="0.551724" stop-color="#FF8800"/><stop offset="0.586207" stop-color="#FF8900"/><stop offset="0.62069" stop-color="#FF8A00"/><stop offset="0.655172" stop-color="#FF8A00"/><stop offset="0.689655" stop-color="#FF8B00"/><stop offset="0.724138" stop-color="#FF8C00"/><stop offset="0.758621" stop-color="#FF8C00"/><stop offset="0.793103" stop-color="#FF8D00"/><stop offset="0.827586" stop-color="#FF8D00"/><stop offset="0.862069" stop-color="#FF8E00"/><stop offset="0.896552" stop-color="#FF8E00"/><stop offset="0.931035" stop-color="#FF8E00"/><stop offset="0.965517" stop-color="#FF8E00"/><stop offset="1" stop-color="#FF8F00"/></linearGradient><clipPath id="clip27"><rect x="767" y="462" width="3716" height="1669"/></clipPath><linearGradient x1="3094" y1="1640.5" x2="3135" y2="1640.5" gradientUnits="userSpaceOnUse" spreadMethod="reflect" id="fill28"><stop offset="0" stop-color="#FF7101"/><stop offset="0.0344828" stop-color="#FF7200"/><stop offset="0.0689655" stop-color="#FF7400"/><stop offset="0.103448" stop-color="#FF7600"/><stop offset="0.137931" stop-color="#FF7800"/><stop offset="0.172414" stop-color="#FF7900"/><stop offset="0.206897" stop-color="#FF7B00"/><stop offset="0.241379" stop-color="#FF7D00"/><stop offset="0.275862" stop-color="#FF7E00"/><stop offset="0.310345" stop-color="#FF8000"/><stop offset="0.344828" stop-color="#FF8100"/><stop offset="0.37931" stop-color="#FF8200"/><stop offset="0.413793" stop-color="#FF8300"/><stop offset="0.448276" stop-color="#FF8500"/><stop offset="0.482759" stop-color="#FF8600"/><stop offset="0.517241" stop-color="#FF8700"/><stop offset="0.551724" stop-color="#FF8800"/><stop offset="0.586207" stop-color="#FF8900"/><stop offset="0.62069" stop-color="#FF8A00"/><stop offset="0.655172" stop-color="#FF8A00"/><stop offset="0.689655" stop-color="#FF8B00"/><stop offset="0.724138" stop-color="#FF8C00"/><stop offset="0.758621" stop-color="#FF8C00"/><stop offset="0.793103" stop-color="#FF8D00"/><stop offset="0.827586" stop-color="#FF8D00"/><stop offset="0.862069" stop-color="#FF8E00"/><stop offset="0.896552" stop-color="#FF8E00"/><stop offset="0.931035" stop-color="#FF8E00"/><stop offset="0.965517" stop-color="#FF8E00"/><stop offset="1" stop-color="#FF8F00"/></linearGradient><clipPath id="clip29"><rect x="767" y="462" width="3716" height="1669"/></clipPath><linearGradient x1="3279" y1="1926" x2="3321" y2="1926" gradientUnits="userSpaceOnUse" spreadMethod="reflect" id="fill30"><stop offset="0" stop-color="#FF7101"/><stop offset="0.0344828" stop-color="#FF7200"/><stop offset="0.0689655" stop-color="#FF7400"/><stop offset="0.103448" stop-color="#FF7600"/><stop offset="0.137931" stop-color="#FF7800"/><stop offset="0.172414" stop-color="#FF7900"/><stop offset="0.206897" stop-color="#FF7B00"/><stop offset="0.241379" stop-color="#FF7D00"/><stop offset="0.275862" stop-color="#FF7E00"/><stop offset="0.310345" stop-color="#FF8000"/><stop offset="0.344828" stop-color="#FF8100"/><stop offset="0.37931" stop-color="#FF8200"/><stop offset="0.413793" stop-color="#FF8300"/><stop offset="0.448276" stop-color="#FF8500"/><stop offset="0.482759" stop-color="#FF8600"/><stop offset="0.517241" stop-color="#FF8700"/><stop offset="0.551724" stop-color="#FF8800"/><stop offset="0.586207" stop-color="#FF8900"/><stop offset="0.62069" stop-color="#FF8A00"/><stop offset="0.655172" stop-color="#FF8A00"/><stop offset="0.689655" stop-color="#FF8B00"/><stop offset="0.724138" stop-color="#FF8C00"/><stop offset="0.758621" stop-color="#FF8C00"/><stop offset="0.793103" stop-color="#FF8D00"/><stop offset="0.827586" stop-color="#FF8D00"/><stop offset="0.862069" stop-color="#FF8E00"/><stop offset="0.896552" stop-color="#FF8E00"/><stop offset="0.931035" stop-color="#FF8E00"/><stop offset="0.965517" stop-color="#FF8E00"/><stop offset="1" stop-color="#FF8F00"/></linearGradient><clipPath id="clip31"><rect x="767" y="462" width="3716" height="1669"/></clipPath><linearGradient x1="3465" y1="1945.5" x2="3507" y2="1945.5" gradientUnits="userSpaceOnUse" spreadMethod="reflect" id="fill32"><stop offset="0" stop-color="#FF7101"/><stop offset="0.0344828" stop-color="#FF7200"/><stop offset="0.0689655" stop-color="#FF7400"/><stop offset="0.103448" stop-color="#FF7600"/><stop offset="0.137931" stop-color="#FF7800"/><stop offset="0.172414" stop-color="#FF7900"/><stop offset="0.206897" stop-color="#FF7B00"/><stop offset="0.241379" stop-color="#FF7D00"/><stop offset="0.275862" stop-color="#FF7E00"/><stop offset="0.310345" stop-color="#FF8000"/><stop offset="0.344828" stop-color="#FF8100"/><stop offset="0.37931" stop-color="#FF8200"/><stop offset="0.413793" stop-color="#FF8300"/><stop offset="0.448276" stop-color="#FF8500"/><stop offset="0.482759" stop-color="#FF8600"/><stop offset="0.517241" stop-color="#FF8700"/><stop offset="0.551724" stop-color="#FF8800"/><stop offset="0.586207" stop-color="#FF8900"/><stop offset="0.62069" stop-color="#FF8A00"/><stop offset="0.655172" stop-color="#FF8A00"/><stop offset="0.689655" stop-color="#FF8B00"/><stop offset="0.724138" stop-color="#FF8C00"/><stop offset="0.758621" stop-color="#FF8C00"/><stop offset="0.793103" stop-color="#FF8D00"/><stop offset="0.827586" stop-color="#FF8D00"/><stop offset="0.862069" stop-color="#FF8E00"/><stop offset="0.896552" stop-color="#FF8E00"/><stop offset="0.931035" stop-color="#FF8E00"/><stop offset="0.965517" stop-color="#FF8E00"/><stop offset="1" stop-color="#FF8F00"/></linearGradient><clipPath id="clip33"><rect x="767" y="462" width="3716" height="1669"/></clipPath><linearGradient x1="3651" y1="1978" x2="3692" y2="1978" gradientUnits="userSpaceOnUse" spreadMethod="reflect" id="fill34"><stop offset="0" stop-color="#FF7101"/><stop offset="0.0344828" stop-color="#FF7200"/><stop offset="0.0689655" stop-color="#FF7400"/><stop offset="0.103448" stop-color="#FF7600"/><stop offset="0.137931" stop-color="#FF7800"/><stop offset="0.172414" stop-color="#FF7900"/><stop offset="0.206897" stop-color="#FF7B00"/><stop offset="0.241379" stop-color="#FF7D00"/><stop offset="0.275862" stop-color="#FF7E00"/><stop offset="0.310345" stop-color="#FF8000"/><stop offset="0.344828" stop-color="#FF8100"/><stop offset="0.37931" stop-color="#FF8200"/><stop offset="0.413793" stop-color="#FF8300"/><stop offset="0.448276" stop-color="#FF8500"/><stop offset="0.482759" stop-color="#FF8600"/><stop offset="0.517241" stop-color="#FF8700"/><stop offset="0.551724" stop-color="#FF8800"/><stop offset="0.586207" stop-color="#FF8900"/><stop offset="0.62069" stop-color="#FF8A00"/><stop offset="0.655172" stop-color="#FF8A00"/><stop offset="0.689655" stop-color="#FF8B00"/><stop offset="0.724138" stop-color="#FF8C00"/><stop offset="0.758621" stop-color="#FF8C00"/><stop offset="0.793103" stop-color="#FF8D00"/><stop offset="0.827586" stop-color="#FF8D00"/><stop offset="0.862069" stop-color="#FF8E00"/><stop offset="0.896552" stop-color="#FF8E00"/><stop offset="0.931035" stop-color="#FF8E00"/><stop offset="0.965517" stop-color="#FF8E00"/><stop offset="1" stop-color="#FF8F00"/></linearGradient><clipPath id="clip35"><rect x="767" y="462" width="3716" height="1669"/></clipPath><linearGradient x1="3836" y1="1368" x2="3878" y2="1368" gradientUnits="userSpaceOnUse" spreadMethod="reflect" id="fill36"><stop offset="0" stop-color="#FF7101"/><stop offset="0.0344828" stop-color="#FF7200"/><stop offset="0.0689655" stop-color="#FF7400"/><stop offset="0.103448" stop-color="#FF7600"/><stop offset="0.137931" stop-color="#FF7800"/><stop offset="0.172414" stop-color="#FF7900"/><stop offset="0.206897" stop-color="#FF7B00"/><stop offset="0.241379" stop-color="#FF7D00"/><stop offset="0.275862" stop-color="#FF7E00"/><stop offset="0.310345" stop-color="#FF8000"/><stop offset="0.344828" stop-color="#FF8100"/><stop offset="0.37931" stop-color="#FF8200"/><stop offset="0.413793" stop-color="#FF8300"/><stop offset="0.448276" stop-color="#FF8500"/><stop offset="0.482759" stop-color="#FF8600"/><stop offset="0.517241" stop-color="#FF8700"/><stop offset="0.551724" stop-color="#FF8800"/><stop offset="0.586207" stop-color="#FF8900"/><stop offset="0.62069" stop-color="#FF8A00"/><stop offset="0.655172" stop-color="#FF8A00"/><stop offset="0.689655" stop-color="#FF8B00"/><stop offset="0.724138" stop-color="#FF8C00"/><stop offset="0.758621" stop-color="#FF8C00"/><stop offset="0.793103" stop-color="#FF8D00"/><stop offset="0.827586" stop-color="#FF8D00"/><stop offset="0.862069" stop-color="#FF8E00"/><stop offset="0.896552" stop-color="#FF8E00"/><stop offset="0.931035" stop-color="#FF8E00"/><stop offset="0.965517" stop-color="#FF8E00"/><stop offset="1" stop-color="#FF8F00"/></linearGradient><clipPath id="clip37"><rect x="767" y="462" width="3716" height="1669"/></clipPath><linearGradient x1="4022" y1="1836.5" x2="4063" y2="1836.5" gradientUnits="userSpaceOnUse" spreadMethod="reflect" id="fill38"><stop offset="0" stop-color="#FF7101"/><stop offset="0.0344828" stop-color="#FF7200"/><stop offset="0.0689655" stop-color="#FF7400"/><stop offset="0.103448" stop-color="#FF7600"/><stop offset="0.137931" stop-color="#FF7800"/><stop offset="0.172414" stop-color="#FF7900"/><stop offset="0.206897" stop-color="#FF7B00"/><stop offset="0.241379" stop-color="#FF7D00"/><stop offset="0.275862" stop-color="#FF7E00"/><stop offset="0.310345" stop-color="#FF8000"/><stop offset="0.344828" stop-color="#FF8100"/><stop offset="0.37931" stop-color="#FF8200"/><stop offset="0.413793" stop-color="#FF8300"/><stop offset="0.448276" stop-color="#FF8500"/><stop offset="0.482759" stop-color="#FF8600"/><stop offset="0.517241" stop-color="#FF8700"/><stop offset="0.551724" stop-color="#FF8800"/><stop offset="0.586207" stop-color="#FF8900"/><stop offset="0.62069" stop-color="#FF8A00"/><stop offset="0.655172" stop-color="#FF8A00"/><stop offset="0.689655" stop-color="#FF8B00"/><stop offset="0.724138" stop-color="#FF8C00"/><stop offset="0.758621" stop-color="#FF8C00"/><stop offset="0.793103" stop-color="#FF8D00"/><stop offset="0.827586" stop-color="#FF8D00"/><stop offset="0.862069" stop-color="#FF8E00"/><stop offset="0.896552" stop-color="#FF8E00"/><stop offset="0.931035" stop-color="#FF8E00"/><stop offset="0.965517" stop-color="#FF8E00"/><stop offset="1" stop-color="#FF8F00"/></linearGradient><clipPath id="clip39"><rect x="767" y="462" width="3716" height="1669"/></clipPath><linearGradient x1="4207" y1="1931" x2="4249" y2="1931" gradientUnits="userSpaceOnUse" spreadMethod="reflect" id="fill40"><stop offset="0" stop-color="#FF7101"/><stop offset="0.0344828" stop-color="#FF7200"/><stop offset="0.0689655" stop-color="#FF7400"/><stop offset="0.103448" stop-color="#FF7600"/><stop offset="0.137931" stop-color="#FF7800"/><stop offset="0.172414" stop-color="#FF7900"/><stop offset="0.206897" stop-color="#FF7B00"/><stop offset="0.241379" stop-color="#FF7D00"/><stop offset="0.275862" stop-color="#FF7E00"/><stop offset="0.310345" stop-color="#FF8000"/><stop offset="0.344828" stop-color="#FF8100"/><stop offset="0.37931" stop-color="#FF8200"/><stop offset="0.413793" stop-color="#FF8300"/><stop offset="0.448276" stop-color="#FF8500"/><stop offset="0.482759" stop-color="#FF8600"/><stop offset="0.517241" stop-color="#FF8700"/><stop offset="0.551724" stop-color="#FF8800"/><stop offset="0.586207" stop-color="#FF8900"/><stop offset="0.62069" stop-color="#FF8A00"/><stop offset="0.655172" stop-color="#FF8A00"/><stop offset="0.689655" stop-color="#FF8B00"/><stop offset="0.724138" stop-color="#FF8C00"/><stop offset="0.758621" stop-color="#FF8C00"/><stop offset="0.793103" stop-color="#FF8D00"/><stop offset="0.827586" stop-color="#FF8D00"/><stop offset="0.862069" stop-color="#FF8E00"/><stop offset="0.896552" stop-color="#FF8E00"/><stop offset="0.931035" stop-color="#FF8E00"/><stop offset="0.965517" stop-color="#FF8E00"/><stop offset="1" stop-color="#FF8F00"/></linearGradient><clipPath id="clip41"><rect x="767" y="462" width="3716" height="1669"/></clipPath><linearGradient x1="4393" y1="1968" x2="4435" y2="1968" gradientUnits="userSpaceOnUse" spreadMethod="reflect" id="fill42"><stop offset="0" stop-color="#FF7101"/><stop offset="0.0344828" stop-color="#FF7200"/><stop offset="0.0689655" stop-color="#FF7400"/><stop offset="0.103448" stop-color="#FF7600"/><stop offset="0.137931" stop-color="#FF7800"/><stop offset="0.172414" stop-color="#FF7900"/><stop offset="0.206897" stop-color="#FF7B00"/><stop offset="0.241379" stop-color="#FF7D00"/><stop offset="0.275862" stop-color="#FF7E00"/><stop offset="0.310345" stop-color="#FF8000"/><stop offset="0.344828" stop-color="#FF8100"/><stop offset="0.37931" stop-color="#FF8200"/><stop offset="0.413793" stop-color="#FF8300"/><stop offset="0.448276" stop-color="#FF8500"/><stop offset="0.482759" stop-color="#FF8600"/><stop offset="0.517241" stop-color="#FF8700"/><stop offset="0.551724" stop-color="#FF8800"/><stop offset="0.586207" stop-color="#FF8900"/><stop offset="0.62069" stop-color="#FF8A00"/><stop offset="0.655172" stop-color="#FF8A00"/><stop offset="0.689655" stop-color="#FF8B00"/><stop offset="0.724138" stop-color="#FF8C00"/><stop offset="0.758621" stop-color="#FF8C00"/><stop offset="0.793103" stop-color="#FF8D00"/><stop offset="0.827586" stop-color="#FF8D00"/><stop offset="0.862069" stop-color="#FF8E00"/><stop offset="0.896552" stop-color="#FF8E00"/><stop offset="0.931035" stop-color="#FF8E00"/><stop offset="0.965517" stop-color="#FF8E00"/><stop offset="1" stop-color="#FF8F00"/></linearGradient><clipPath id="clip43"><rect x="519" y="292" width="4293" height="2444"/></clipPath><clipPath id="clip44"><rect x="519" y="292" width="4293" height="2444"/></clipPath><clipPath id="clip45"><rect x="519" y="292" width="4293" height="2444"/></clipPath><clipPath id="clip46"><rect x="519" y="292" width="4293" height="2444"/></clipPath><clipPath id="clip47"><rect x="519" y="292" width="4293" height="2444"/></clipPath><clipPath id="clip48"><rect x="519" y="292" width="4293" height="2444"/></clipPath><clipPath id="clip49"><rect x="519" y="292" width="4293" height="2444"/></clipPath><clipPath id="clip50"><rect x="519" y="292" width="4293" height="2444"/></clipPath><clipPath id="clip51"><rect x="519" y="292" width="4293" height="2444"/></clipPath><clipPath id="clip52"><rect x="519" y="292" width="4293" height="2444"/></clipPath><clipPath id="clip53"><rect x="519" y="292" width="4293" height="2444"/></clipPath><clipPath id="clip54"><rect x="519" y="292" width="4293" height="2444"/></clipPath><clipPath id="clip55"><rect x="519" y="292" width="4293" height="2444"/></clipPath><clipPath id="clip56"><rect x="519" y="292" width="4293" height="2444"/></clipPath><clipPath id="clip57"><rect x="519" y="292" width="4293" height="2444"/></clipPath><clipPath id="clip58"><rect x="519" y="292" width="4293" height="2444"/></clipPath><clipPath id="clip59"><rect x="519" y="292" width="4293" height="2444"/></clipPath><clipPath id="clip60"><rect x="519" y="292" width="4293" height="2444"/></clipPath><clipPath id="clip61"><rect x="519" y="292" width="4293" height="2444"/></clipPath><clipPath id="clip62"><rect x="519" y="292" width="4293" height="2444"/></clipPath><clipPath id="clip63"><rect x="519" y="292" width="4293" height="2444"/></clipPath><clipPath id="clip64"><rect x="519" y="292" width="4293" height="2444"/></clipPath><clipPath id="clip65"><rect x="519" y="292" width="4293" height="2444"/></clipPath><clipPath id="clip66"><rect x="519" y="292" width="4293" height="2444"/></clipPath><clipPath id="clip67"><rect x="519" y="292" width="4293" height="2444"/></clipPath><clipPath id="clip68"><rect x="519" y="292" width="4293" height="2444"/></clipPath><clipPath id="clip69"><rect x="519" y="292" width="4293" height="2444"/></clipPath><clipPath id="clip70"><rect x="519" y="292" width="4293" height="2444"/></clipPath><clipPath id="clip71"><rect x="519" y="292" width="4293" height="2444"/></clipPath><clipPath id="clip72"><rect x="519" y="292" width="4293" height="2444"/></clipPath><clipPath id="clip73"><rect x="519" y="292" width="4293" height="2444"/></clipPath><clipPath id="clip74"><rect x="519" y="292" width="4293" height="2444"/></clipPath><clipPath id="clip75"><rect x="519" y="292" width="4293" height="2444"/></clipPath><clipPath id="clip76"><rect x="519" y="292" width="4293" height="2444"/></clipPath><clipPath id="clip77"><rect x="519" y="292" width="4293" height="2444"/></clipPath><clipPath id="clip78"><rect x="519" y="292" width="4293" height="2444"/></clipPath><clipPath id="clip79"><rect x="519" y="292" width="4293" height="2444"/></clipPath><clipPath id="clip80"><rect x="519" y="292" width="4293" height="2444"/></clipPath><clipPath id="clip81"><rect x="519" y="292" width="4293" height="2444"/></clipPath><clipPath id="clip82"><rect x="519" y="292" width="4293" height="2444"/></clipPath><clipPath id="clip83"><rect x="519" y="292" width="4293" height="2444"/></clipPath><clipPath id="clip84"><rect x="519" y="292" width="4293" height="2444"/></clipPath><clipPath id="clip85"><rect x="519" y="292" width="4293" height="2444"/></clipPath><clipPath id="clip86"><rect x="519" y="292" width="4293" height="2444"/></clipPath><clipPath id="clip87"><rect x="519" y="292" width="4293" height="2444"/></clipPath><clipPath id="clip88"><rect x="519" y="292" width="4293" height="2444"/></clipPath><clipPath id="clip89"><rect x="519" y="292" width="4293" height="2444"/></clipPath><clipPath id="clip90"><rect x="519" y="292" width="4293" height="2444"/></clipPath><clipPath id="clip91"><rect x="519" y="292" width="4293" height="2444"/></clipPath><clipPath id="clip92"><rect x="519" y="292" width="4293" height="2444"/></clipPath><clipPath id="clip93"><rect x="519" y="292" width="4293" height="2444"/></clipPath><clipPath id="clip94"><rect x="519" y="292" width="4293" height="2444"/></clipPath><clipPath id="clip95"><rect x="519" y="292" width="4293" height="2444"/></clipPath><clipPath id="clip96"><rect x="519" y="292" width="4293" height="2444"/></clipPath><clipPath id="clip97"><rect x="519" y="292" width="4293" height="2444"/></clipPath><clipPath id="clip98"><rect x="519" y="292" width="4293" height="2444"/></clipPath><clipPath id="clip99"><rect x="519" y="292" width="4293" height="2444"/></clipPath><clipPath id="clip100"><rect x="519" y="292" width="4293" height="2444"/></clipPath><clipPath id="clip101"><rect x="519" y="292" width="4293" height="2444"/></clipPath><clipPath id="clip102"><rect x="519" y="292" width="4293" height="2444"/></clipPath><clipPath id="clip103"><rect x="519" y="292" width="4293" height="2444"/></clipPath><clipPath id="clip104"><rect x="519" y="292" width="4293" height="2444"/></clipPath><clipPath id="clip105"><rect x="519" y="292" width="4293" height="2444"/></clipPath><clipPath id="clip106"><rect x="519" y="292" width="4293" height="2444"/></clipPath><clipPath id="clip107"><rect x="519" y="292" width="4293" height="2444"/></clipPath><clipPath id="clip108"><rect x="519" y="292" width="4293" height="2444"/></clipPath><clipPath id="clip109"><rect x="519" y="292" width="4293" height="2444"/></clipPath><clipPath id="clip110"><rect x="519" y="292" width="4293" height="2444"/></clipPath><clipPath id="clip111"><rect x="519" y="292" width="4293" height="2444"/></clipPath><clipPath id="clip112"><rect x="519" y="292" width="4293" height="2444"/></clipPath><clipPath id="clip113"><rect x="519" y="292" width="4293" height="2444"/></clipPath><clipPath id="clip114"><rect x="519" y="292" width="4293" height="2444"/></clipPath><clipPath id="clip115"><rect x="519" y="292" width="4293" height="2444"/></clipPath><clipPath id="clip116"><rect x="519" y="292" width="4293" height="2444"/></clipPath><clipPath id="clip117"><rect x="519" y="292" width="4293" height="2444"/></clipPath><clipPath id="clip118"><rect x="519" y="292" width="4293" height="2444"/></clipPath><clipPath id="clip119"><rect x="519" y="292" width="4293" height="2444"/></clipPath><clipPath id="clip120"><rect x="519" y="292" width="4293" height="2444"/></clipPath><clipPath id="clip121"><rect x="519" y="292" width="4293" height="2444"/></clipPath><clipPath id="clip122"><rect x="519" y="292" width="4293" height="2444"/></clipPath><clipPath id="clip123"><rect x="519" y="292" width="4293" height="2444"/></clipPath><clipPath id="clip124"><rect x="519" y="292" width="4293" height="2444"/></clipPath><clipPath id="clip125"><rect x="519" y="292" width="4293" height="2444"/></clipPath><clipPath id="clip126"><rect x="519" y="292" width="4293" height="2444"/></clipPath><clipPath id="clip127"><rect x="519" y="292" width="4293" height="2444"/></clipPath><clipPath id="clip128"><rect x="519" y="292" width="4293" height="2444"/></clipPath><clipPath id="clip129"><rect x="519" y="292" width="4293" height="2444"/></clipPath><clipPath id="clip130"><rect x="519" y="292" width="4293" height="2444"/></clipPath><clipPath id="clip131"><rect x="519" y="292" width="4293" height="2444"/></clipPath><clipPath id="clip132"><rect x="519" y="292" width="4293" height="2444"/></clipPath><clipPath id="clip133"><rect x="519" y="292" width="4293" height="2444"/></clipPath><clipPath id="clip134"><rect x="519" y="292" width="4293" height="2444"/></clipPath><clipPath id="clip135"><rect x="519" y="292" width="4293" height="2444"/></clipPath><clipPath id="clip136"><rect x="519" y="292" width="4293" height="2444"/></clipPath><clipPath id="clip137"><rect x="519" y="292" width="4293" height="2444"/></clipPath><clipPath id="clip138"><rect x="519" y="292" width="4293" height="2444"/></clipPath><clipPath id="clip139"><rect x="519" y="292" width="4293" height="2444"/></clipPath><clipPath id="clip140"><rect x="519" y="292" width="4293" height="2444"/></clipPath><clipPath id="clip141"><rect x="519" y="292" width="4293" height="2444"/></clipPath><clipPath id="clip142"><rect x="519" y="292" width="4293" height="2444"/></clipPath><clipPath id="clip143"><rect x="519" y="292" width="4293" height="2444"/></clipPath><clipPath id="clip144"><rect x="519" y="292" width="4293" height="2444"/></clipPath><clipPath id="clip145"><rect x="519" y="292" width="4293" height="2444"/></clipPath><clipPath id="clip146"><rect x="519" y="292" width="4293" height="2444"/></clipPath><clipPath id="clip147"><rect x="519" y="292" width="4293" height="2444"/></clipPath><clipPath id="clip148"><rect x="519" y="292" width="4293" height="2444"/></clipPath><clipPath id="clip149"><rect x="519" y="292" width="4293" height="2444"/></clipPath><clipPath id="clip150"><rect x="519" y="292" width="4293" height="2444"/></clipPath><clipPath id="clip151"><rect x="519" y="292" width="4293" height="2444"/></clipPath><clipPath id="clip152"><rect x="519" y="292" width="4293" height="2444"/></clipPath><clipPath id="clip153"><rect x="519" y="292" width="4293" height="2444"/></clipPath><clipPath id="clip154"><rect x="519" y="292" width="4293" height="2444"/></clipPath><clipPath id="clip155"><rect x="519" y="292" width="4293" height="2444"/></clipPath><clipPath id="clip156"><rect x="519" y="292" width="4293" height="2444"/></clipPath><clipPath id="clip157"><rect x="519" y="292" width="4293" height="2444"/></clipPath><clipPath id="clip158"><rect x="519" y="292" width="4293" height="2444"/></clipPath><clipPath id="clip159"><rect x="519" y="292" width="4293" height="2444"/></clipPath><clipPath id="clip160"><rect x="519" y="292" width="4293" height="2444"/></clipPath><clipPath id="clip161"><rect x="519" y="292" width="4293" height="2444"/></clipPath><clipPath id="clip162"><rect x="519" y="292" width="4293" height="2444"/></clipPath><clipPath id="clip163"><rect x="519" y="292" width="4293" height="2444"/></clipPath><clipPath id="clip164"><rect x="519" y="292" width="4293" height="2444"/></clipPath><clipPath id="clip165"><rect x="519" y="292" width="4293" height="2444"/></clipPath><clipPath id="clip166"><rect x="519" y="292" width="4293" height="2444"/></clipPath><clipPath id="clip167"><rect x="519" y="292" width="4293" height="2444"/></clipPath><clipPath id="clip168"><rect x="519" y="292" width="4293" height="2444"/></clipPath><clipPath id="clip169"><rect x="519" y="292" width="4293" height="2444"/></clipPath><clipPath id="clip170"><rect x="519" y="292" width="4293" height="2444"/></clipPath><clipPath id="clip171"><rect x="519" y="292" width="4293" height="2444"/></clipPath><linearGradient x1="4552" y1="1612.5" x2="4575" y2="1612.5" gradientUnits="userSpaceOnUse" spreadMethod="reflect" id="fill172"><stop offset="0" stop-color="#FF7101"/><stop offset="0.0344828" stop-color="#FF7200"/><stop offset="0.0689655" stop-color="#FF7400"/><stop offset="0.103448" stop-color="#FF7600"/><stop offset="0.137931" stop-color="#FF7800"/><stop offset="0.172414" stop-color="#FF7900"/><stop offset="0.206897" stop-color="#FF7B00"/><stop offset="0.241379" stop-color="#FF7D00"/><stop offset="0.275862" stop-color="#FF7E00"/><stop offset="0.310345" stop-color="#FF8000"/><stop offset="0.344828" stop-color="#FF8100"/><stop offset="0.37931" stop-color="#FF8200"/><stop offset="0.413793" stop-color="#FF8300"/><stop offset="0.448276" stop-color="#FF8500"/><stop offset="0.482759" stop-color="#FF8600"/><stop offset="0.517241" stop-color="#FF8700"/><stop offset="0.551724" stop-color="#FF8800"/><stop offset="0.586207" stop-color="#FF8900"/><stop offset="0.62069" stop-color="#FF8A00"/><stop offset="0.655172" stop-color="#FF8A00"/><stop offset="0.689655" stop-color="#FF8B00"/><stop offset="0.724138" stop-color="#FF8C00"/><stop offset="0.758621" stop-color="#FF8C00"/><stop offset="0.793103" stop-color="#FF8D00"/><stop offset="0.827586" stop-color="#FF8D00"/><stop offset="0.862069" stop-color="#FF8E00"/><stop offset="0.896552" stop-color="#FF8E00"/><stop offset="0.931035" stop-color="#FF8E00"/><stop offset="0.965517" stop-color="#FF8E00"/><stop offset="1" stop-color="#FF8F00"/></linearGradient><clipPath id="clip173"><rect x="519" y="292" width="4293" height="2444"/></clipPath></defs><g clip-path="url(#clip0)" transform="translate(-518 -291)"><rect x="519" y="292" width="4293" height="2444" fill="#FFFFFF"/><g clip-path="url(#clip1)"><path d="M767.5 1890.56 4480.5 1890.56M767.5 1652.55 4480.5 1652.55M767.5 1414.55 4480.5 1414.55M767.5 1176.54 4480.5 1176.54M767.5 938.531 4480.5 938.531M767.5 700.523 4480.5 700.523M767.5 462.5 4480.5 462.5" stroke="#D9D9D9" stroke-width="3.4375" stroke-linejoin="round" stroke-miterlimit="10" fill="none"/></g><g clip-path="url(#clip2)"><path d="M813.027 2111.07 855.028 2111.07 855.028 2128 813.027 2128ZM999.033 2118.07 1041.03 2118.07 1041.03 2128 999.033 2128ZM1185.04 2117.07 1226.04 2117.07 1226.04 2128 1185.04 2128ZM1370.04 2115.07 1412.05 2115.07 1412.05 2128 1370.04 2128ZM1556.05 2112.07 1598.05 2112.07 1598.05 2128 1556.05 2128ZM1741.06 2116.07 1783.06 2116.07 1783.06 2128 1741.06 2128ZM1927.06 2116.07 1969.06 2116.07 1969.06 2128 1927.06 2128ZM2113.07 2114.07 2154.07 2114.07 2154.07 2128 2113.07 2128ZM2298.08 2089.07 2340.08 2089.07 2340.08 2128 2298.08 2128ZM2484.08 2100.07 2526.08 2100.07 2526.08 2128 2484.08 2128ZM2670.09 2104.07 2711.09 2104.07 2711.09 2128 2670.09 2128ZM2855.09 2103.07 2897.1 2103.07 2897.1 2128 2855.09 2128ZM3041.1 1884.06 3082.1 1884.06 3082.1 2128 3041.1 2128ZM3226.11 2044.07 3268.11 2044.07 3268.11 2128 3226.11 2128ZM3412.11 2057.07 3454.11 2057.07 3454.11 2128 3412.11 2128ZM3598.12 2053.07 3639.12 2053.07 3639.12 2128 3598.12 2128ZM3783.12 1731.06 3825.13 1731.06 3825.13 2128 3783.12 2128ZM3969.13 1954.06 4010.13 1954.06 4010.13 2128 3969.13 2128ZM4154.14 2023.07 4196.14 2023.07 4196.14 2128 4154.14 2128ZM4340.14 2014.07 4382.14 2014.07 4382.14 2128 4340.14 2128Z" fill="#C00000"/></g><g clip-path="url(#clip3)"><rect x="866" y="1984" width="41.9999" height="144" fill="url(#fill4)"/></g><g clip-path="url(#clip5)"><rect x="1052" y="2054" width="42.0002" height="74.0002" fill="url(#fill6)"/></g><g clip-path="url(#clip7)"><rect x="1238" y="2060" width="41.0002" height="68" fill="url(#fill8)"/></g><g clip-path="url(#clip9)"><rect x="1423" y="2059" width="41.9999" height="69" fill="url(#fill10)"/></g><g clip-path="url(#clip11)"><rect x="1609" y="1978" width="40.9999" height="150" fill="url(#fill12)"/></g><g clip-path="url(#clip13)"><rect x="1794" y="2026" width="41.9999" height="102" fill="url(#fill14)"/></g><g clip-path="url(#clip15)"><rect x="1980" y="2042" width="41.9999" height="86.0004" fill="url(#fill16)"/></g><g clip-path="url(#clip17)"><rect x="2166" y="2058" width="41" height="70.0002" fill="url(#fill18)"/></g><g clip-path="url(#clip19)"><rect x="2351" y="1863" width="42" height="265" fill="url(#fill20)"/></g><g clip-path="url(#clip21)"><rect x="2537" y="1950" width="41.0002" height="178" fill="url(#fill22)"/></g><g clip-path="url(#clip23)"><rect x="2722" y="1957" width="42" height="171" fill="url(#fill24)"/></g><g clip-path="url(#clip25)"><rect x="2908" y="1953" width="42" height="175" fill="url(#fill26)"/></g><g clip-path="url(#clip27)"><rect x="3094" y="1153" width="41" height="975" fill="url(#fill28)"/></g><g clip-path="url(#clip29)"><rect x="3279" y="1724" width="42" height="404" fill="url(#fill30)"/></g><g clip-path="url(#clip31)"><rect x="3465" y="1763" width="42" height="365" fill="url(#fill32)"/></g><g clip-path="url(#clip33)"><rect x="3651" y="1828" width="41" height="300" fill="url(#fill34)"/></g><g clip-path="url(#clip35)"><rect x="3836" y="608" width="42" height="1520" fill="url(#fill36)"/></g><g clip-path="url(#clip37)"><rect x="4022" y="1545" width="41.0002" height="583" fill="url(#fill38)"/></g><g clip-path="url(#clip39)"><rect x="4207" y="1734" width="42" height="394" fill="url(#fill40)"/></g><g clip-path="url(#clip41)"><rect x="4393" y="1808" width="42" height="320" fill="url(#fill42)"/></g><g clip-path="url(#clip43)"><path d="M767.5 2128.5 4480.5 2128.5" stroke="#D9D9D9" stroke-width="3.4375" stroke-linejoin="round" stroke-miterlimit="10" fill="none" fill-rule="evenodd"/></g><g clip-path="url(#clip44)"><path d="M767.5 2128.5 767.5 2208.5M953.531 2128.5 953.531 2208.5M1139.54 2128.5 1139.54 2208.5M1324.54 2128.5 1324.54 2208.5M1510.55 2128.5 1510.55 2208.5M1695.56 2128.5 1695.56 2208.5M1881.56 2128.5 1881.56 2208.5M2067.57 2128.5 2067.57 2208.5M2252.57 2128.5 2252.57 2208.5M2438.58 2128.5 2438.58 2208.5M2623.59 2128.5 2623.59 2208.5M2809.59 2128.5 2809.59 2208.5M2995.6 2128.5 2995.6 2208.5M3180.6 2128.5 3180.6 2208.5M3366.61 2128.5 3366.61 2208.5M3551.62 2128.5 3551.62 2208.5M3737.62 2128.5 3737.62 2208.5M3923.63 2128.5 3923.63 2208.5M4108.63 2128.5 4108.63 2208.5M4294.64 2128.5 4294.64 2208.5M4480.5 2128.5 4480.5 2208.5" stroke="#D9D9D9" stroke-width="3.4375" stroke-linejoin="round" stroke-miterlimit="10" fill="none"/></g><g clip-path="url(#clip45)"><path d="M767.5 2208.5 767.5 2288.5M953.531 2208.5 953.531 2288.5M1139.54 2208.5 1139.54 2288.5M1324.54 2208.5 1324.54 2288.5M1510.55 2208.5 1510.55 2288.5M1695.56 2208.5 1695.56 2288.5M1881.56 2208.5 1881.56 2288.5M2067.57 2208.5 2067.57 2288.5M2252.57 2208.5 2252.57 2288.5M2438.58 2208.5 2438.58 2288.5M2623.59 2208.5 2623.59 2288.5M2809.59 2208.5 2809.59 2288.5M2995.6 2208.5 2995.6 2288.5M3180.6 2208.5 3180.6 2288.5M3366.61 2208.5 3366.61 2288.5M3551.62 2208.5 3551.62 2288.5M3737.62 2208.5 3737.62 2288.5M3923.63 2208.5 3923.63 2288.5M4108.63 2208.5 4108.63 2288.5M4294.64 2208.5 4294.64 2288.5M4480.5 2208.5 4480.5 2288.5" stroke="#D9D9D9" stroke-width="3.4375" stroke-linejoin="round" stroke-miterlimit="10" fill="none"/></g><g clip-path="url(#clip46)"><path d="M767.5 2288.5 767.5 2368.5M953.531 2288.5 953.531 2368.5M1139.54 2288.5 1139.54 2368.5M1324.54 2288.5 1324.54 2368.5M1510.55 2288.5 1510.55 2368.5M1695.56 2288.5 1695.56 2368.5M1881.56 2288.5 1881.56 2368.5M2067.57 2288.5 2067.57 2368.5M2252.57 2288.5 2252.57 2368.5M2438.58 2288.5 2438.58 2368.5M2623.59 2288.5 2623.59 2368.5M2809.59 2288.5 2809.59 2368.5M2995.6 2288.5 2995.6 2368.5M3180.6 2288.5 3180.6 2368.5M3366.61 2288.5 3366.61 2368.5M3551.62 2288.5 3551.62 2368.5M3737.62 2288.5 3737.62 2368.5M3923.63 2288.5 3923.63 2368.5M4108.63 2288.5 4108.63 2368.5M4294.64 2288.5 4294.64 2368.5M4480.5 2288.5 4480.5 2368.5" stroke="#D9D9D9" stroke-width="3.4375" stroke-linejoin="round" stroke-miterlimit="10" fill="none"/></g><g clip-path="url(#clip47)"><path d="M767.5 2368.5 767.5 2448.5M1510.55 2368.5 1510.55 2448.5M2252.57 2368.5 2252.57 2448.5M2995.6 2368.5 2995.6 2448.5M3737.62 2368.5 3737.62 2448.5M4480.5 2368.5 4480.5 2448.5" stroke="#D9D9D9" stroke-width="3.4375" stroke-linejoin="round" stroke-miterlimit="10" fill="none"/></g><g clip-path="url(#clip48)"><text fill="#404040" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 813.39 2079)">37</text></g><g clip-path="url(#clip49)"><text fill="#404040" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 999.004 2086)">22</text></g><g clip-path="url(#clip50)"><text fill="#404040" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 1184.62 2085)">23</text></g><g clip-path="url(#clip51)"><text fill="#404040" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 1370.23 2083)">28</text></g><g clip-path="url(#clip52)"><text fill="#404040" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 1555.84 2080)">34</text></g><g clip-path="url(#clip53)"><text fill="#404040" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 1741.46 2084)">27</text></g><g clip-path="url(#clip54)"><text fill="#404040" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 1927.07 2084)">27</text></g><g clip-path="url(#clip55)"><text fill="#404040" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 2112.68 2082)">31</text></g><g clip-path="url(#clip56)"><text fill="#404040" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 2298.3 2057)">83</text></g><g clip-path="url(#clip57)"><text fill="#404040" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 2483.91 2068)">60</text></g><g clip-path="url(#clip58)"><text fill="#404040" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 2669.52 2072)">51</text></g><g clip-path="url(#clip59)"><text fill="#404040" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 2855.14 2071)">53</text></g><g clip-path="url(#clip60)"><text fill="#404040" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 3030.3 1852)">514</text></g><g clip-path="url(#clip61)"><text fill="#404040" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 3215.91 2012)">177</text></g><g clip-path="url(#clip62)"><text fill="#404040" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 3401.53 2025)">151</text></g><g clip-path="url(#clip63)"><text fill="#404040" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 3587.14 2021)">159</text></g><g clip-path="url(#clip64)"><text fill="#404040" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 3772.75 1699)">835</text></g><g clip-path="url(#clip65)"><text fill="#404040" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 3958.37 1923)">365</text></g><g clip-path="url(#clip66)"><text fill="#404040" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 4143.98 1991)">221</text></g><g clip-path="url(#clip67)"><text fill="#404040" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 4329.59 1982)">241</text></g><g clip-path="url(#clip68)"><text fill="#404040" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 855.795 1952)">303</text></g><g clip-path="url(#clip69)"><text fill="#404040" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 1041.41 2022)">156</text></g><g clip-path="url(#clip70)"><text fill="#404040" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 1227.02 2028)">144</text></g><g clip-path="url(#clip71)"><text fill="#404040" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 1412.63 2027)">146</text></g><g clip-path="url(#clip72)"><text fill="#404040" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 1598.25 1946)">315</text></g><g clip-path="url(#clip73)"><text fill="#404040" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 1783.86 1994)">215</text></g><g clip-path="url(#clip74)"><text fill="#404040" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 1969.47 2010)">181</text></g><g clip-path="url(#clip75)"><text fill="#404040" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 2155.09 2026)">149</text></g><g clip-path="url(#clip76)"><text fill="#404040" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 2340.7 1831)">557</text></g><g clip-path="url(#clip77)"><text fill="#404040" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 2526.31 1918)">376</text></g><g clip-path="url(#clip78)"><text fill="#404040" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 2711.93 1925)">359</text></g><g clip-path="url(#clip79)"><text fill="#404040" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 2897.54 1921)">368</text></g><g clip-path="url(#clip80)"><text fill="#404040" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 3072.7 1121)">2049</text></g><g clip-path="url(#clip81)"><text fill="#404040" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 3268.77 1692)">849</text></g><g clip-path="url(#clip82)"><text fill="#404040" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 3454.38 1731)">769</text></g><g clip-path="url(#clip83)"><text fill="#404040" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 3639.99 1796)">631</text></g><g clip-path="url(#clip84)"><text fill="#404040" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 3815.16 576)">3194</text></g><g clip-path="url(#clip85)"><text fill="#404040" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 4000.77 1513)">1225</text></g><g clip-path="url(#clip86)"><text fill="#404040" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 4196.83 1702)">828</text></g><g clip-path="url(#clip87)"><text fill="#404040" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 4382.45 1776)">674</text></g><g clip-path="url(#clip88)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 708.744 2140)">0</text></g><g clip-path="url(#clip89)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 666.944 1902)">500</text></g><g clip-path="url(#clip90)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 646.044 1664)">1000</text></g><g clip-path="url(#clip91)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 646.044 1426)">1500</text></g><g clip-path="url(#clip92)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 646.044 1188)">2000</text></g><g clip-path="url(#clip93)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 646.044 950)">2500</text></g><g clip-path="url(#clip94)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 646.044 712)">3000</text></g><g clip-path="url(#clip95)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 646.044 474)">3500</text></g><g clip-path="url(#clip96)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 839.817 2194)">48</text></g><g clip-path="url(#clip97)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 1025.43 2194)">24</text></g><g clip-path="url(#clip98)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 1211.04 2194)">12</text></g><g clip-path="url(#clip99)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 1407.11 2194)">6</text></g><g clip-path="url(#clip100)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 1582.27 2194)">48</text></g><g clip-path="url(#clip101)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 1767.88 2194)">24</text></g><g clip-path="url(#clip102)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 1953.5 2194)">12</text></g><g clip-path="url(#clip103)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 2149.56 2194)">6</text></g><g clip-path="url(#clip104)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 2324.72 2194)">48</text></g><g clip-path="url(#clip105)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 2510.34 2194)">24</text></g><g clip-path="url(#clip106)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 2695.95 2194)">12</text></g><g clip-path="url(#clip107)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 2892.01 2194)">6</text></g><g clip-path="url(#clip108)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 3067.18 2194)">48</text></g><g clip-path="url(#clip109)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 3252.79 2194)">24</text></g><g clip-path="url(#clip110)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 3438.4 2194)">12</text></g><g clip-path="url(#clip111)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 3634.47 2194)">6</text></g><g clip-path="url(#clip112)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 3809.63 2194)">48</text></g><g clip-path="url(#clip113)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 3995.24 2194)">24</text></g><g clip-path="url(#clip114)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 4180.86 2194)">12</text></g><g clip-path="url(#clip115)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 4376.92 2194)">6</text></g><g clip-path="url(#clip116)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 850.267 2274)">8</text></g><g clip-path="url(#clip117)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 1035.88 2274)">4</text></g><g clip-path="url(#clip118)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 1221.49 2274)">2</text></g><g clip-path="url(#clip119)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 1407.11 2274)">1</text></g><g clip-path="url(#clip120)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 1592.72 2274)">8</text></g><g clip-path="url(#clip121)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 1778.33 2274)">4</text></g><g clip-path="url(#clip122)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 1963.95 2274)">2</text></g><g clip-path="url(#clip123)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 2149.56 2274)">1</text></g><g clip-path="url(#clip124)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 2335.17 2274)">8</text></g><g clip-path="url(#clip125)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 2520.79 2274)">4</text></g><g clip-path="url(#clip126)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 2706.4 2274)">2</text></g><g clip-path="url(#clip127)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 2892.01 2274)">1</text></g><g clip-path="url(#clip128)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 3077.63 2274)">8</text></g><g clip-path="url(#clip129)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 3263.24 2274)">4</text></g><g clip-path="url(#clip130)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 3448.85 2274)">2</text></g><g clip-path="url(#clip131)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 3634.47 2274)">1</text></g><g clip-path="url(#clip132)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 3820.08 2274)">8</text></g><g clip-path="url(#clip133)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 4005.69 2274)">4</text></g><g clip-path="url(#clip134)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 4191.31 2274)">2</text></g><g clip-path="url(#clip135)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 4376.92 2274)">1</text></g><g clip-path="url(#clip136)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 850.267 2354)">1</text></g><g clip-path="url(#clip137)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 1035.88 2354)">2</text></g><g clip-path="url(#clip138)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 1221.49 2354)">4</text></g><g clip-path="url(#clip139)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 1407.11 2354)">8</text></g><g clip-path="url(#clip140)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 1592.72 2354)">1</text></g><g clip-path="url(#clip141)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 1778.33 2354)">2</text></g><g clip-path="url(#clip142)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 1963.95 2354)">4</text></g><g clip-path="url(#clip143)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 2149.56 2354)">8</text></g><g clip-path="url(#clip144)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 2335.17 2354)">1</text></g><g clip-path="url(#clip145)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 2520.79 2354)">2</text></g><g clip-path="url(#clip146)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 2706.4 2354)">4</text></g><g clip-path="url(#clip147)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 2892.01 2354)">8</text></g><g clip-path="url(#clip148)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 3077.63 2354)">1</text></g><g clip-path="url(#clip149)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 3263.24 2354)">2</text></g><g clip-path="url(#clip150)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 3448.85 2354)">4</text></g><g clip-path="url(#clip151)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 3634.47 2354)">8</text></g><g clip-path="url(#clip152)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 3820.08 2354)">1</text></g><g clip-path="url(#clip153)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 4005.69 2354)">2</text></g><g clip-path="url(#clip154)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 4191.31 2354)">4</text></g><g clip-path="url(#clip155)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 4376.92 2354)">8</text></g><g clip-path="url(#clip156)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 1118.24 2434)">20</text></g><g clip-path="url(#clip157)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 1860.69 2434)">32</text></g><g clip-path="url(#clip158)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 2592.69 2434)">128</text></g><g clip-path="url(#clip159)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 3335.15 2434)">384</text></g><g clip-path="url(#clip160)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 4077.6 2434)">512</text></g><g clip-path="url(#clip161)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="46" transform="matrix(6.12323e-17 -1 1 6.12323e-17 620.354 1598)">Max Latency over instances (ms)</text></g><g clip-path="url(#clip162)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="46" transform="matrix(1 0 0 1 2360.1 2499)">Numer of cores per instance</text></g><g clip-path="url(#clip163)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="46" transform="matrix(1 0 0 1 2411.5 2555)">Batch size per instance</text></g><g clip-path="url(#clip164)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="46" transform="matrix(1 0 0 1 2417.14 2611)">Number of instance(s)</text></g><g clip-path="url(#clip165)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="46" transform="matrix(1 0 0 1 2469.12 2666)">Sequence length</text></g><g clip-path="url(#clip166)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="64" transform="matrix(1 0 0 1 1795.18 388)">Max Latency over multi</text></g><g clip-path="url(#clip167)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="64" transform="matrix(1 0 0 1 2403.39 388)">-</text></g><g clip-path="url(#clip168)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="64" transform="matrix(1 0 0 1 2423.02 388)">instances workloads for total batch size = 8</text></g><g clip-path="url(#clip169)"><rect x="4552" y="1524" width="23" height="23" fill="#C00000"/></g><g clip-path="url(#clip170)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 4584.5 1547)">pytorch</text></g><g clip-path="url(#clip171)"><rect x="4552" y="1601" width="23" height="23" fill="url(#fill172)"/></g><g clip-path="url(#clip173)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 4584.5 1624)">tensorflow</text></g><rect x="519.5" y="292.5" width="4293" height="2444" stroke="#D9D9D9" stroke-width="3.4375" stroke-linejoin="round" stroke-miterlimit="10" fill="none"/></g></svg>
1
0
hf_public_repos/blog/assets/21_bert_cpu_scaling_part_1
hf_public_repos/blog/assets/21_bert_cpu_scaling_part_1/imgs/core_count_scaling.svg
<svg width="4708" height="2261" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" overflow="hidden"><defs><clipPath id="clip0"><rect x="13" y="-1" width="4708" height="2261"/></clipPath><clipPath id="clip1"><rect x="13" y="0" width="4706" height="2257"/></clipPath><clipPath id="clip2"><rect x="332" y="169" width="4058" height="1650"/></clipPath><clipPath id="clip3"><rect x="332" y="169" width="4058" height="1650"/></clipPath><linearGradient x1="478" y1="1730" x2="435" y2="1730" gradientUnits="userSpaceOnUse" spreadMethod="reflect" id="fill4"><stop offset="0" stop-color="#FF7101"/><stop offset="0.98" stop-color="#FF8F00"/><stop offset="1" stop-color="#FF8F00"/></linearGradient><clipPath id="clip5"><rect x="332" y="169" width="4058" height="1650"/></clipPath><linearGradient x1="671" y1="1623.5" x2="628" y2="1623.5" gradientUnits="userSpaceOnUse" spreadMethod="reflect" id="fill6"><stop offset="0" stop-color="#FF7101"/><stop offset="0.98" stop-color="#FF8F00"/><stop offset="1" stop-color="#FF8F00"/></linearGradient><clipPath id="clip7"><rect x="332" y="169" width="4058" height="1650"/></clipPath><linearGradient x1="864" y1="1667.5" x2="821" y2="1667.5" gradientUnits="userSpaceOnUse" spreadMethod="reflect" id="fill8"><stop offset="0" stop-color="#FF7101"/><stop offset="0.98" stop-color="#FF8F00"/><stop offset="1" stop-color="#FF8F00"/></linearGradient><clipPath id="clip9"><rect x="332" y="169" width="4058" height="1650"/></clipPath><linearGradient x1="1057" y1="1731" x2="1014" y2="1731" gradientUnits="userSpaceOnUse" spreadMethod="reflect" id="fill10"><stop offset="0" stop-color="#FF7101"/><stop offset="0.98" stop-color="#FF8F00"/><stop offset="1" stop-color="#FF8F00"/></linearGradient><clipPath id="clip11"><rect x="332" y="169" width="4058" height="1650"/></clipPath><linearGradient x1="1250" y1="1736.5" x2="1207" y2="1736.5" gradientUnits="userSpaceOnUse" spreadMethod="reflect" id="fill12"><stop offset="0" stop-color="#FF7101"/><stop offset="0.98" stop-color="#FF8F00"/><stop offset="1" stop-color="#FF8F00"/></linearGradient><clipPath id="clip13"><rect x="332" y="169" width="4058" height="1650"/></clipPath><linearGradient x1="1443" y1="1730.5" x2="1400" y2="1730.5" gradientUnits="userSpaceOnUse" spreadMethod="reflect" id="fill14"><stop offset="0" stop-color="#FF7101"/><stop offset="0.98" stop-color="#FF8F00"/><stop offset="1" stop-color="#FF8F00"/></linearGradient><clipPath id="clip15"><rect x="332" y="169" width="4058" height="1650"/></clipPath><linearGradient x1="1636" y1="1732" x2="1593" y2="1732" gradientUnits="userSpaceOnUse" spreadMethod="reflect" id="fill16"><stop offset="0" stop-color="#FF7101"/><stop offset="0.98" stop-color="#FF8F00"/><stop offset="1" stop-color="#FF8F00"/></linearGradient><clipPath id="clip17"><rect x="332" y="169" width="4058" height="1650"/></clipPath><linearGradient x1="1829" y1="1649" x2="1786" y2="1649" gradientUnits="userSpaceOnUse" spreadMethod="reflect" id="fill18"><stop offset="0" stop-color="#FF7101"/><stop offset="0.98" stop-color="#FF8F00"/><stop offset="1" stop-color="#FF8F00"/></linearGradient><clipPath id="clip19"><rect x="332" y="169" width="4058" height="1650"/></clipPath><linearGradient x1="2023" y1="1483" x2="1979" y2="1483" gradientUnits="userSpaceOnUse" spreadMethod="reflect" id="fill20"><stop offset="0" stop-color="#FF7101"/><stop offset="0.98" stop-color="#FF8F00"/><stop offset="1" stop-color="#FF8F00"/></linearGradient><clipPath id="clip21"><rect x="332" y="169" width="4058" height="1650"/></clipPath><linearGradient x1="2216" y1="1546" x2="2172" y2="1546" gradientUnits="userSpaceOnUse" spreadMethod="reflect" id="fill22"><stop offset="0" stop-color="#FF7101"/><stop offset="0.98" stop-color="#FF8F00"/><stop offset="1" stop-color="#FF8F00"/></linearGradient><clipPath id="clip23"><rect x="332" y="169" width="4058" height="1650"/></clipPath><linearGradient x1="2409" y1="1599" x2="2365" y2="1599" gradientUnits="userSpaceOnUse" spreadMethod="reflect" id="fill24"><stop offset="0" stop-color="#FF7101"/><stop offset="0.98" stop-color="#FF8F00"/><stop offset="1" stop-color="#FF8F00"/></linearGradient><clipPath id="clip25"><rect x="332" y="169" width="4058" height="1650"/></clipPath><linearGradient x1="2602" y1="1661.5" x2="2558" y2="1661.5" gradientUnits="userSpaceOnUse" spreadMethod="reflect" id="fill26"><stop offset="0" stop-color="#FF7101"/><stop offset="0.98" stop-color="#FF8F00"/><stop offset="1" stop-color="#FF8F00"/></linearGradient><clipPath id="clip27"><rect x="332" y="169" width="4058" height="1650"/></clipPath><linearGradient x1="2795" y1="1687" x2="2751" y2="1687" gradientUnits="userSpaceOnUse" spreadMethod="reflect" id="fill28"><stop offset="0" stop-color="#FF7101"/><stop offset="0.98" stop-color="#FF8F00"/><stop offset="1" stop-color="#FF8F00"/></linearGradient><clipPath id="clip29"><rect x="332" y="169" width="4058" height="1650"/></clipPath><linearGradient x1="2988" y1="1671" x2="2945" y2="1671" gradientUnits="userSpaceOnUse" spreadMethod="reflect" id="fill30"><stop offset="0" stop-color="#FF7101"/><stop offset="0.98" stop-color="#FF8F00"/><stop offset="1" stop-color="#FF8F00"/></linearGradient><clipPath id="clip31"><rect x="332" y="169" width="4058" height="1650"/></clipPath><linearGradient x1="3181" y1="1209.5" x2="3138" y2="1209.5" gradientUnits="userSpaceOnUse" spreadMethod="reflect" id="fill32"><stop offset="0" stop-color="#FF7101"/><stop offset="0.98" stop-color="#FF8F00"/><stop offset="1" stop-color="#FF8F00"/></linearGradient><clipPath id="clip33"><rect x="332" y="169" width="4058" height="1650"/></clipPath><linearGradient x1="3374" y1="1157.5" x2="3331" y2="1157.5" gradientUnits="userSpaceOnUse" spreadMethod="reflect" id="fill34"><stop offset="0" stop-color="#FF7101"/><stop offset="0.98" stop-color="#FF8F00"/><stop offset="1" stop-color="#FF8F00"/></linearGradient><clipPath id="clip35"><rect x="332" y="169" width="4058" height="1650"/></clipPath><linearGradient x1="3567" y1="1318.5" x2="3524" y2="1318.5" gradientUnits="userSpaceOnUse" spreadMethod="reflect" id="fill36"><stop offset="0" stop-color="#FF7101"/><stop offset="0.98" stop-color="#FF8F00"/><stop offset="1" stop-color="#FF8F00"/></linearGradient><clipPath id="clip37"><rect x="332" y="169" width="4058" height="1650"/></clipPath><linearGradient x1="3760" y1="1409" x2="3717" y2="1409" gradientUnits="userSpaceOnUse" spreadMethod="reflect" id="fill38"><stop offset="0" stop-color="#FF7101"/><stop offset="0.98" stop-color="#FF8F00"/><stop offset="1" stop-color="#FF8F00"/></linearGradient><clipPath id="clip39"><rect x="332" y="169" width="4058" height="1650"/></clipPath><linearGradient x1="3953" y1="1497.5" x2="3910" y2="1497.5" gradientUnits="userSpaceOnUse" spreadMethod="reflect" id="fill40"><stop offset="0" stop-color="#FF7101"/><stop offset="0.98" stop-color="#FF8F00"/><stop offset="1" stop-color="#FF8F00"/></linearGradient><clipPath id="clip41"><rect x="332" y="169" width="4058" height="1650"/></clipPath><linearGradient x1="4146" y1="1509.5" x2="4103" y2="1509.5" gradientUnits="userSpaceOnUse" spreadMethod="reflect" id="fill42"><stop offset="0" stop-color="#FF7101"/><stop offset="0.98" stop-color="#FF8F00"/><stop offset="1" stop-color="#FF8F00"/></linearGradient><clipPath id="clip43"><rect x="332" y="169" width="4058" height="1650"/></clipPath><linearGradient x1="4339" y1="1529.5" x2="4296" y2="1529.5" gradientUnits="userSpaceOnUse" spreadMethod="reflect" id="fill44"><stop offset="0" stop-color="#FF7101"/><stop offset="0.98" stop-color="#FF8F00"/><stop offset="1" stop-color="#FF8F00"/></linearGradient><clipPath id="clip45"><rect x="13" y="0" width="4706" height="2257"/></clipPath><clipPath id="clip46"><rect x="13" y="0" width="4706" height="2257"/></clipPath><clipPath id="clip47"><rect x="13" y="0" width="4706" height="2257"/></clipPath><clipPath id="clip48"><rect x="13" y="0" width="4706" height="2257"/></clipPath><clipPath id="clip49"><rect x="13" y="0" width="4706" height="2257"/></clipPath><clipPath id="clip50"><rect x="13" y="0" width="4706" height="2257"/></clipPath><clipPath id="clip51"><rect x="13" y="0" width="4706" height="2257"/></clipPath><clipPath id="clip52"><rect x="13" y="0" width="4706" height="2257"/></clipPath><clipPath id="clip53"><rect x="13" y="0" width="4706" height="2257"/></clipPath><clipPath id="clip54"><rect x="13" y="0" width="4706" height="2257"/></clipPath><clipPath id="clip55"><rect x="13" y="0" width="4706" height="2257"/></clipPath><clipPath id="clip56"><rect x="13" y="0" width="4706" height="2257"/></clipPath><clipPath id="clip57"><rect x="13" y="0" width="4706" height="2257"/></clipPath><clipPath id="clip58"><rect x="13" y="0" width="4706" height="2257"/></clipPath><clipPath id="clip59"><rect x="13" y="0" width="4706" height="2257"/></clipPath><clipPath id="clip60"><rect x="13" y="0" width="4706" height="2257"/></clipPath><clipPath id="clip61"><rect x="13" y="0" width="4706" height="2257"/></clipPath><clipPath id="clip62"><rect x="13" y="0" width="4706" height="2257"/></clipPath><clipPath id="clip63"><rect x="13" y="0" width="4706" height="2257"/></clipPath><clipPath id="clip64"><rect x="13" y="0" width="4706" height="2257"/></clipPath><clipPath id="clip65"><rect x="13" y="0" width="4706" height="2257"/></clipPath><clipPath id="clip66"><rect x="13" y="0" width="4706" height="2257"/></clipPath><clipPath id="clip67"><rect x="13" y="0" width="4706" height="2257"/></clipPath><clipPath id="clip68"><rect x="13" y="0" width="4706" height="2257"/></clipPath><clipPath id="clip69"><rect x="13" y="0" width="4706" height="2257"/></clipPath><clipPath id="clip70"><rect x="13" y="0" width="4706" height="2257"/></clipPath><clipPath id="clip71"><rect x="13" y="0" width="4706" height="2257"/></clipPath><clipPath id="clip72"><rect x="13" y="0" width="4706" height="2257"/></clipPath><clipPath id="clip73"><rect x="13" y="0" width="4706" height="2257"/></clipPath><clipPath id="clip74"><rect x="13" y="0" width="4706" height="2257"/></clipPath><clipPath id="clip75"><rect x="13" y="0" width="4706" height="2257"/></clipPath><clipPath id="clip76"><rect x="13" y="0" width="4706" height="2257"/></clipPath><clipPath id="clip77"><rect x="13" y="0" width="4706" height="2257"/></clipPath><clipPath id="clip78"><rect x="13" y="0" width="4706" height="2257"/></clipPath><clipPath id="clip79"><rect x="13" y="0" width="4706" height="2257"/></clipPath><clipPath id="clip80"><rect x="13" y="0" width="4706" height="2257"/></clipPath><clipPath id="clip81"><rect x="13" y="0" width="4706" height="2257"/></clipPath><clipPath id="clip82"><rect x="13" y="0" width="4706" height="2257"/></clipPath><clipPath id="clip83"><rect x="13" y="0" width="4706" height="2257"/></clipPath><clipPath id="clip84"><rect x="13" y="0" width="4706" height="2257"/></clipPath><clipPath id="clip85"><rect x="13" y="0" width="4706" height="2257"/></clipPath><clipPath id="clip86"><rect x="13" y="0" width="4706" height="2257"/></clipPath><clipPath id="clip87"><rect x="13" y="0" width="4706" height="2257"/></clipPath><clipPath id="clip88"><rect x="13" y="0" width="4706" height="2257"/></clipPath><clipPath id="clip89"><rect x="13" y="0" width="4706" height="2257"/></clipPath><clipPath id="clip90"><rect x="13" y="0" width="4706" height="2257"/></clipPath><clipPath id="clip91"><rect x="13" y="0" width="4706" height="2257"/></clipPath><clipPath id="clip92"><rect x="13" y="0" width="4706" height="2257"/></clipPath><clipPath id="clip93"><rect x="13" y="0" width="4706" height="2257"/></clipPath><clipPath id="clip94"><rect x="13" y="0" width="4706" height="2257"/></clipPath><clipPath id="clip95"><rect x="13" y="0" width="4706" height="2257"/></clipPath><clipPath id="clip96"><rect x="13" y="0" width="4706" height="2257"/></clipPath><clipPath id="clip97"><rect x="13" y="0" width="4706" height="2257"/></clipPath><clipPath id="clip98"><rect x="13" y="0" width="4706" height="2257"/></clipPath><clipPath id="clip99"><rect x="13" y="0" width="4706" height="2257"/></clipPath><clipPath id="clip100"><rect x="13" y="0" width="4706" height="2257"/></clipPath><clipPath id="clip101"><rect x="13" y="0" width="4706" height="2257"/></clipPath><clipPath id="clip102"><rect x="13" y="0" width="4706" height="2257"/></clipPath><linearGradient x1="129" y1="2103.5" x2="106" y2="2103.5" gradientUnits="userSpaceOnUse" spreadMethod="reflect" id="fill103"><stop offset="0" stop-color="#FF7101"/><stop offset="0.98" stop-color="#FF8F00"/><stop offset="1" stop-color="#FF8F00"/></linearGradient><clipPath id="clip104"><rect x="13" y="0" width="4706" height="2257"/></clipPath><clipPath id="clip105"><rect x="13" y="0" width="4706" height="2257"/></clipPath><clipPath id="clip106"><rect x="13" y="0" width="4706" height="2257"/></clipPath><clipPath id="clip107"><rect x="13" y="0" width="4706" height="2257"/></clipPath><clipPath id="clip108"><rect x="13" y="0" width="4706" height="2257"/></clipPath><clipPath id="clip109"><rect x="13" y="0" width="4706" height="2257"/></clipPath><clipPath id="clip110"><rect x="13" y="0" width="4706" height="2257"/></clipPath><clipPath id="clip111"><rect x="13" y="0" width="4706" height="2257"/></clipPath><clipPath id="clip112"><rect x="13" y="0" width="4706" height="2257"/></clipPath><clipPath id="clip113"><rect x="13" y="0" width="4706" height="2257"/></clipPath><clipPath id="clip114"><rect x="13" y="0" width="4706" height="2257"/></clipPath><clipPath id="clip115"><rect x="13" y="0" width="4706" height="2257"/></clipPath><clipPath id="clip116"><rect x="13" y="0" width="4706" height="2257"/></clipPath><clipPath id="clip117"><rect x="13" y="0" width="4706" height="2257"/></clipPath><clipPath id="clip118"><rect x="13" y="0" width="4706" height="2257"/></clipPath><clipPath id="clip119"><rect x="13" y="0" width="4706" height="2257"/></clipPath><clipPath id="clip120"><rect x="13" y="0" width="4706" height="2257"/></clipPath><clipPath id="clip121"><rect x="13" y="0" width="4706" height="2257"/></clipPath><clipPath id="clip122"><rect x="13" y="0" width="4706" height="2257"/></clipPath><clipPath id="clip123"><rect x="13" y="0" width="4706" height="2257"/></clipPath><clipPath id="clip124"><rect x="13" y="0" width="4706" height="2257"/></clipPath><clipPath id="clip125"><rect x="13" y="0" width="4706" height="2257"/></clipPath><clipPath id="clip126"><rect x="13" y="0" width="4706" height="2257"/></clipPath><clipPath id="clip127"><rect x="13" y="0" width="4706" height="2257"/></clipPath><clipPath id="clip128"><rect x="13" y="0" width="4706" height="2257"/></clipPath><clipPath id="clip129"><rect x="13" y="0" width="4706" height="2257"/></clipPath><clipPath id="clip130"><rect x="13" y="0" width="4706" height="2257"/></clipPath><clipPath id="clip131"><rect x="13" y="0" width="4706" height="2257"/></clipPath><clipPath id="clip132"><rect x="13" y="0" width="4706" height="2257"/></clipPath><clipPath id="clip133"><rect x="13" y="0" width="4706" height="2257"/></clipPath><clipPath id="clip134"><rect x="13" y="0" width="4706" height="2257"/></clipPath><clipPath id="clip135"><rect x="13" y="0" width="4706" height="2257"/></clipPath><clipPath id="clip136"><rect x="13" y="0" width="4706" height="2257"/></clipPath><clipPath id="clip137"><rect x="13" y="0" width="4706" height="2257"/></clipPath><clipPath id="clip138"><rect x="13" y="0" width="4706" height="2257"/></clipPath><clipPath id="clip139"><rect x="13" y="0" width="4706" height="2257"/></clipPath><linearGradient x1="4481" y1="1227.5" x2="4459" y2="1227.5" gradientUnits="userSpaceOnUse" spreadMethod="reflect" id="fill140"><stop offset="0" stop-color="#FF7101"/><stop offset="0.98" stop-color="#FF8F00"/><stop offset="1" stop-color="#FF8F00"/></linearGradient><clipPath id="clip141"><rect x="13" y="0" width="4706" height="2257"/></clipPath></defs><g clip-path="url(#clip0)" transform="translate(-13 1)"><rect x="14.0001" y="0" width="4705" height="2258" fill="#FFFFFF"/><g clip-path="url(#clip1)"><path d="M332.5 1541.55 4386.5 1541.55M332.5 1267.54 4386.5 1267.54M332.5 992.533 4386.5 992.533M332.5 718.524 4386.5 718.524M332.5 444.515 4386.5 444.515M332.5 169.5 4386.5 169.5" stroke="#D9D9D9" stroke-width="3.4375" stroke-linejoin="round" stroke-miterlimit="10" fill="none"/></g><g clip-path="url(#clip2)"><path d="M380.012 1701.06 423.014 1701.06 423.014 1816 380.012 1816ZM573.019 1733.06 616.02 1733.06 616.02 1816 573.019 1816ZM766.025 1766.06 809.027 1766.06 809.027 1816 766.025 1816ZM959.031 1783.06 1002.03 1783.06 1002.03 1816 959.031 1816ZM1152.04 1790.06 1195.04 1790.06 1195.04 1816 1152.04 1816ZM1345.04 1791.06 1388.05 1791.06 1388.05 1816 1345.04 1816ZM1538.05 1789.06 1581.05 1789.06 1581.05 1816 1538.05 1816ZM1731.06 1576.05 1774.06 1576.05 1774.06 1816 1731.06 1816ZM1924.06 1665.05 1967.06 1665.05 1967.06 1816 1924.06 1816ZM2117.07 1722.06 2161.07 1722.06 2161.07 1816 2117.07 1816ZM2310.08 1758.06 2354.08 1758.06 2354.08 1816 2310.08 1816ZM2503.08 1772.06 2547.08 1772.06 2547.08 1816 2503.08 1816ZM2696.09 1777.06 2740.09 1777.06 2740.09 1816 2696.09 1816ZM2889.09 1771.06 2933.1 1771.06 2933.1 1816 2889.09 1816ZM3083.1 757.025 3126.1 757.025 3126.1 1816 3083.1 1816ZM3276.11 1228.04 3319.11 1228.04 3319.11 1816 3276.11 1816ZM3469.11 1503.05 3512.12 1503.05 3512.12 1816 3469.11 1816ZM3662.12 1615.05 3705.12 1615.05 3705.12 1816 3662.12 1816ZM3855.13 1690.06 3898.13 1690.06 3898.13 1816 3855.13 1816ZM4048.13 1669.05 4091.13 1669.05 4091.13 1816 4048.13 1816ZM4241.14 1725.06 4284.14 1725.06 4284.14 1816 4241.14 1816Z" fill="#C00000"/></g><g clip-path="url(#clip3)"><rect x="435" y="1644" width="43.0003" height="172" fill="url(#fill4)"/></g><g clip-path="url(#clip5)"><rect x="628" y="1431" width="42.9999" height="385" fill="url(#fill6)"/></g><g clip-path="url(#clip7)"><rect x="821" y="1519" width="42.9999" height="297" fill="url(#fill8)"/></g><g clip-path="url(#clip9)"><rect x="1014" y="1646" width="42.9999" height="170" fill="url(#fill10)"/></g><g clip-path="url(#clip11)"><rect x="1207" y="1657" width="43" height="159" fill="url(#fill12)"/></g><g clip-path="url(#clip13)"><rect x="1400" y="1645" width="43" height="171" fill="url(#fill14)"/></g><g clip-path="url(#clip15)"><rect x="1593" y="1648" width="43" height="168" fill="url(#fill16)"/></g><g clip-path="url(#clip17)"><rect x="1786" y="1482" width="43" height="334" fill="url(#fill18)"/></g><g clip-path="url(#clip19)"><rect x="1979" y="1150" width="44" height="666" fill="url(#fill20)"/></g><g clip-path="url(#clip21)"><rect x="2172" y="1276" width="44" height="540" fill="url(#fill22)"/></g><g clip-path="url(#clip23)"><rect x="2365" y="1382" width="44" height="434" fill="url(#fill24)"/></g><g clip-path="url(#clip25)"><rect x="2558" y="1507" width="44" height="309" fill="url(#fill26)"/></g><g clip-path="url(#clip27)"><rect x="2751" y="1558" width="44" height="258" fill="url(#fill28)"/></g><g clip-path="url(#clip29)"><rect x="2945" y="1526" width="43" height="290" fill="url(#fill30)"/></g><g clip-path="url(#clip31)"><rect x="3138" y="603" width="43" height="1213" fill="url(#fill32)"/></g><g clip-path="url(#clip33)"><rect x="3331" y="499" width="43" height="1317" fill="url(#fill34)"/></g><g clip-path="url(#clip35)"><rect x="3524" y="821" width="43" height="995" fill="url(#fill36)"/></g><g clip-path="url(#clip37)"><rect x="3717" y="1002" width="43" height="814" fill="url(#fill38)"/></g><g clip-path="url(#clip39)"><rect x="3910" y="1179" width="43" height="637" fill="url(#fill40)"/></g><g clip-path="url(#clip41)"><rect x="4103" y="1203" width="43" height="613" fill="url(#fill42)"/></g><g clip-path="url(#clip43)"><rect x="4296" y="1243" width="43.0005" height="573" fill="url(#fill44)"/></g><g clip-path="url(#clip45)"><path d="M332.5 1815.5 4386.5 1815.5" stroke="#D9D9D9" stroke-width="3.4375" stroke-linejoin="round" stroke-miterlimit="10" fill="none" fill-rule="evenodd"/></g><g clip-path="url(#clip46)"><path d="M332.5 1815.5 4386.5 1815.5" stroke="#D9D9D9" stroke-width="3.4375" stroke-linejoin="round" stroke-miterlimit="10" fill="none" fill-rule="evenodd"/></g><g clip-path="url(#clip47)"><path d="M332.5 1815.5 4386.5 1815.5" stroke="#D9D9D9" stroke-width="3.4375" stroke-linejoin="round" stroke-miterlimit="10" fill="none" fill-rule="evenodd"/></g><g clip-path="url(#clip48)"><path d="M332.5 1815.5 4386.5 1815.5" stroke="#D9D9D9" stroke-width="3.4375" stroke-linejoin="round" stroke-miterlimit="10" fill="none" fill-rule="evenodd"/></g><g clip-path="url(#clip49)"><path d="M332.5 1815.5 332.5 1879.5M332.5 1815.5 332.5 1879.5M525.517 1815.5 525.517 1879.5M525.517 1815.5 525.517 1879.5M718.524 1815.5 718.524 1879.5M718.524 1815.5 718.524 1879.5M911.53 1815.5 911.53 1879.5M911.53 1815.5 911.53 1879.5M1104.54 1815.5 1104.54 1879.5M1104.54 1815.5 1104.54 1879.5M1297.54 1815.5 1297.54 1879.5M1297.54 1815.5 1297.54 1879.5M1490.55 1815.5 1490.55 1879.5M1490.55 1815.5 1490.55 1879.5M1683.56 1815.5 1683.56 1879.5M1683.56 1815.5 1683.56 1879.5M1876.56 1815.5 1876.56 1879.5M1876.56 1815.5 1876.56 1879.5M2069.57 1815.5 2069.57 1879.5M2069.57 1815.5 2069.57 1879.5M2262.57 1815.5 2262.57 1879.5M2262.57 1815.5 2262.57 1879.5M2455.58 1815.5 2455.58 1879.5M2455.58 1815.5 2455.58 1879.5M2649.59 1815.5 2649.59 1879.5M2649.59 1815.5 2649.59 1879.5M2842.59 1815.5 2842.59 1879.5M2842.59 1815.5 2842.59 1879.5M3035.6 1815.5 3035.6 1879.5M3035.6 1815.5 3035.6 1879.5M3228.61 1815.5 3228.61 1879.5M3228.61 1815.5 3228.61 1879.5M3421.61 1815.5 3421.61 1879.5M3421.61 1815.5 3421.61 1879.5M3614.62 1815.5 3614.62 1879.5M3614.62 1815.5 3614.62 1879.5M3807.63 1815.5 3807.63 1879.5M3807.63 1815.5 3807.63 1879.5M4000.63 1815.5 4000.63 1879.5M4000.63 1815.5 4000.63 1879.5M4193.64 1815.5 4193.64 1879.5M4193.64 1815.5 4193.64 1879.5M4386.5 1815.5 4386.5 1879.5M4386.5 1815.5 4386.5 1879.5" stroke="#D9D9D9" stroke-width="3.4375" stroke-linejoin="round" stroke-miterlimit="10" fill="none"/></g><g clip-path="url(#clip50)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 418.405 1859)">1</text></g><g clip-path="url(#clip51)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 611.469 1859)">2</text></g><g clip-path="url(#clip52)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 804.534 1859)">4</text></g><g clip-path="url(#clip53)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 997.599 1859)">8</text></g><g clip-path="url(#clip54)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 1180.21 1859)">16</text></g><g clip-path="url(#clip55)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 1373.28 1859)">24</text></g><g clip-path="url(#clip56)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 1566.34 1859)">48</text></g><g clip-path="url(#clip57)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 1769.86 1859)">1</text></g><g clip-path="url(#clip58)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 1962.92 1859)">2</text></g><g clip-path="url(#clip59)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 2155.99 1859)">4</text></g><g clip-path="url(#clip60)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 2349.05 1859)">8</text></g><g clip-path="url(#clip61)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 2531.66 1859)">16</text></g><g clip-path="url(#clip62)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 2724.73 1859)">24</text></g><g clip-path="url(#clip63)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 2917.79 1859)">48</text></g><g clip-path="url(#clip64)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 3121.31 1859)">1</text></g><g clip-path="url(#clip65)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 3314.37 1859)">2</text></g><g clip-path="url(#clip66)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 3507.44 1859)">4</text></g><g clip-path="url(#clip67)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 3700.5 1859)">8</text></g><g clip-path="url(#clip68)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 3883.12 1859)">16</text></g><g clip-path="url(#clip69)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 4076.18 1859)">24</text></g><g clip-path="url(#clip70)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 4269.24 1859)">48</text></g><g clip-path="url(#clip71)"><path d="M332.5 1879.5 332.5 1942.5M1683.56 1879.5 1683.56 1942.5M3035.6 1879.5 3035.6 1942.5M4386.5 1879.5 4386.5 1942.5" stroke="#D9D9D9" stroke-width="3.4375" stroke-linejoin="round" stroke-miterlimit="10" fill="none"/></g><g clip-path="url(#clip72)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 987.149 1922)">32</text></g><g clip-path="url(#clip73)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 2328.15 1922)">128</text></g><g clip-path="url(#clip74)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 3679.6 1922)">512</text></g><g clip-path="url(#clip75)"><path d="M332.5 1942.5 332.5 2006.5M4386.5 1942.5 4386.5 2006.5" stroke="#D9D9D9" stroke-width="3.4375" stroke-linejoin="round" stroke-miterlimit="10" fill="none"/></g><g clip-path="url(#clip76)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 2349.05 1986)">1</text></g><g clip-path="url(#clip77)"><path d="M91.4999 2006.5 4386.5 2006.5M91.4999 2006.5 91.4999 2071.5M332.511 2006.5 332.511 2071.5M332.511 2006.5 332.511 2071.5M525.517 2006.5 525.517 2071.5M525.517 2006.5 525.517 2071.5M718.524 2006.5 718.524 2071.5M718.524 2006.5 718.524 2071.5M911.53 2006.5 911.53 2071.5M911.53 2006.5 911.53 2071.5M1104.54 2006.5 1104.54 2071.5M1104.54 2006.5 1104.54 2071.5M1297.54 2006.5 1297.54 2071.5M1297.54 2006.5 1297.54 2071.5M1490.55 2006.5 1490.55 2071.5M1490.55 2006.5 1490.55 2071.5M1683.56 2006.5 1683.56 2071.5M1683.56 2006.5 1683.56 2071.5M1876.56 2006.5 1876.56 2071.5M1876.56 2006.5 1876.56 2071.5M2069.57 2006.5 2069.57 2071.5M2069.57 2006.5 2069.57 2071.5M2262.57 2006.5 2262.57 2071.5M2262.57 2006.5 2262.57 2071.5M2455.58 2006.5 2455.58 2071.5M2455.58 2006.5 2455.58 2071.5M2649.59 2006.5 2649.59 2071.5M2649.59 2006.5 2649.59 2071.5M2842.59 2006.5 2842.59 2071.5M2842.59 2006.5 2842.59 2071.5M3035.6 2006.5 3035.6 2071.5M3035.6 2006.5 3035.6 2071.5M3228.61 2006.5 3228.61 2071.5M3228.61 2006.5 3228.61 2071.5M3421.61 2006.5 3421.61 2071.5M3421.61 2006.5 3421.61 2071.5M3614.62 2006.5 3614.62 2071.5M3614.62 2006.5 3614.62 2071.5M3807.63 2006.5 3807.63 2071.5M3807.63 2006.5 3807.63 2071.5M4000.63 2006.5 4000.63 2071.5M4000.63 2006.5 4000.63 2071.5M4193.64 2006.5 4193.64 2071.5M4193.64 2006.5 4193.64 2071.5M4386.5 2006.5 4386.5 2071.5M4386.5 2006.5 4386.5 2071.5" stroke="#D9D9D9" stroke-width="3.4375" stroke-linejoin="round" stroke-miterlimit="10" fill="none"/></g><g clip-path="url(#clip78)"><rect x="106" y="2026" width="23" height="23.0001" fill="#C00000"/></g><g clip-path="url(#clip79)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 138.699 2049)">pytorch</text></g><g clip-path="url(#clip80)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 407.955 2050)">84</text></g><g clip-path="url(#clip81)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 601.019 2050)">60</text></g><g clip-path="url(#clip82)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 794.084 2050)">36</text></g><g clip-path="url(#clip83)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 987.149 2050)">24</text></g><g clip-path="url(#clip84)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 1180.21 2050)">19</text></g><g clip-path="url(#clip85)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 1373.28 2050)">18</text></g><g clip-path="url(#clip86)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 1566.34 2050)">20</text></g><g clip-path="url(#clip87)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 1748.96 2050)">175</text></g><g clip-path="url(#clip88)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 1942.02 2050)">110</text></g><g clip-path="url(#clip89)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 2145.54 2050)">69</text></g><g clip-path="url(#clip90)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 2338.6 2050)">42</text></g><g clip-path="url(#clip91)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 2531.66 2050)">32</text></g><g clip-path="url(#clip92)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 2724.73 2050)">28</text></g><g clip-path="url(#clip93)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 2917.79 2050)">33</text></g><g clip-path="url(#clip94)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 3100.41 2050)">772</text></g><g clip-path="url(#clip95)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 3293.47 2050)">428</text></g><g clip-path="url(#clip96)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 3486.54 2050)">228</text></g><g clip-path="url(#clip97)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 3679.6 2050)">146</text></g><g clip-path="url(#clip98)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 3883.12 2050)">92</text></g><g clip-path="url(#clip99)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 4065.73 2050)">107</text></g><g clip-path="url(#clip100)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 4269.24 2050)">66</text></g><g clip-path="url(#clip101)"><path d="M91.4999 2071.5 4386.5 2071.5M91.4999 2071.5 91.4999 2137.5M91.4999 2137.5 4386.5 2137.5M332.511 2071.5 332.511 2137.5M332.511 2071.5 332.511 2137.5M525.517 2071.5 525.517 2137.5M525.517 2071.5 525.517 2137.5M718.524 2071.5 718.524 2137.5M718.524 2071.5 718.524 2137.5M911.53 2071.5 911.53 2137.5M911.53 2071.5 911.53 2137.5M1104.54 2071.5 1104.54 2137.5M1104.54 2071.5 1104.54 2137.5M1297.54 2071.5 1297.54 2137.5M1297.54 2071.5 1297.54 2137.5M1490.55 2071.5 1490.55 2137.5M1490.55 2071.5 1490.55 2137.5M1683.56 2071.5 1683.56 2137.5M1683.56 2071.5 1683.56 2137.5M1876.56 2071.5 1876.56 2137.5M1876.56 2071.5 1876.56 2137.5M2069.57 2071.5 2069.57 2137.5M2069.57 2071.5 2069.57 2137.5M2262.57 2071.5 2262.57 2137.5M2262.57 2071.5 2262.57 2137.5M2455.58 2071.5 2455.58 2137.5M2455.58 2071.5 2455.58 2137.5M2649.59 2071.5 2649.59 2137.5M2649.59 2071.5 2649.59 2137.5M2842.59 2071.5 2842.59 2137.5M2842.59 2071.5 2842.59 2137.5M3035.6 2071.5 3035.6 2137.5M3035.6 2071.5 3035.6 2137.5M3228.61 2071.5 3228.61 2137.5M3228.61 2071.5 3228.61 2137.5M3421.61 2071.5 3421.61 2137.5M3421.61 2071.5 3421.61 2137.5M3614.62 2071.5 3614.62 2137.5M3614.62 2071.5 3614.62 2137.5M3807.63 2071.5 3807.63 2137.5M3807.63 2071.5 3807.63 2137.5M4000.63 2071.5 4000.63 2137.5M4000.63 2071.5 4000.63 2137.5M4193.64 2071.5 4193.64 2137.5M4193.64 2071.5 4193.64 2137.5M4386.5 2071.5 4386.5 2137.5M4386.5 2071.5 4386.5 2137.5" stroke="#D9D9D9" stroke-width="3.4375" stroke-linejoin="round" stroke-miterlimit="10" fill="none"/></g><g clip-path="url(#clip102)"><rect x="106" y="2092" width="23" height="23" fill="url(#fill103)"/></g><g clip-path="url(#clip104)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 138.699 2115)">tensorflow</text></g><g clip-path="url(#clip105)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 397.505 2116)">125</text></g><g clip-path="url(#clip106)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 590.569 2116)">280</text></g><g clip-path="url(#clip107)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 783.634 2116)">217</text></g><g clip-path="url(#clip108)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 976.699 2116)">123</text></g><g clip-path="url(#clip109)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 1169.76 2116)">115</text></g><g clip-path="url(#clip110)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 1362.83 2116)">124</text></g><g clip-path="url(#clip111)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 1555.89 2116)">122</text></g><g clip-path="url(#clip112)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 1748.96 2116)">243</text></g><g clip-path="url(#clip113)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 1942.02 2116)">486</text></g><g clip-path="url(#clip114)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 2135.09 2116)">393</text></g><g clip-path="url(#clip115)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 2328.15 2116)">317</text></g><g clip-path="url(#clip116)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 2521.21 2116)">225</text></g><g clip-path="url(#clip117)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 2714.28 2116)">188</text></g><g clip-path="url(#clip118)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 2907.34 2116)">211</text></g><g clip-path="url(#clip119)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 3100.41 2116)">884</text></g><g clip-path="url(#clip120)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 3293.47 2116)">960</text></g><g clip-path="url(#clip121)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 3486.54 2116)">726</text></g><g clip-path="url(#clip122)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 3679.6 2116)">593</text></g><g clip-path="url(#clip123)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 3872.67 2116)">464</text></g><g clip-path="url(#clip124)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 4065.73 2116)">447</text></g><g clip-path="url(#clip125)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 4258.79 2116)">418</text></g><g clip-path="url(#clip126)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 273.156 1827)">0</text></g><g clip-path="url(#clip127)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 231.356 1553)">200</text></g><g clip-path="url(#clip128)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 231.356 1279)">400</text></g><g clip-path="url(#clip129)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 231.356 1004)">600</text></g><g clip-path="url(#clip130)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 231.356 730)">800</text></g><g clip-path="url(#clip131)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 210.456 456)">1000</text></g><g clip-path="url(#clip132)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 210.456 182)">1200</text></g><g clip-path="url(#clip133)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="46" transform="matrix(6.12323e-17 -1 1 6.12323e-17 115.042 1111)">Latency (ms)</text></g><g clip-path="url(#clip134)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="46" transform="matrix(1 0 0 1 1773.91 2189)">Sequence Length / Number of thread for computations kernels</text></g><g clip-path="url(#clip135)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="64" transform="matrix(1 0 0 1 690.009 95)">Impact of the number of threads involved in the computation kernels with respect to the problem size (batch &amp; sequence length</text></g><g clip-path="url(#clip136)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="64" transform="matrix(1 0 0 1 4023.28 95)">)</text></g><g clip-path="url(#clip137)"><rect x="4459" y="1139" width="22" height="22" fill="#C00000"/></g><g clip-path="url(#clip138)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 4491 1162)">pytorch</text></g><g clip-path="url(#clip139)"><rect x="4459" y="1216" width="22" height="23" fill="url(#fill140)"/></g><g clip-path="url(#clip141)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 4491 1239)">tensorflow</text></g><rect x="14.4999" y="0.499836" width="4705" height="2258" stroke="#D9D9D9" stroke-width="3.4375" stroke-linejoin="round" stroke-miterlimit="10" fill="none"/></g></svg>
2
0
hf_public_repos/blog/assets/21_bert_cpu_scaling_part_1
hf_public_repos/blog/assets/21_bert_cpu_scaling_part_1/imgs/numa_combined.svg
<svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 2000 788" width="2000" height="788"> <defs> <image width="2000" height="788" id="img1" href="data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAB9AAAAMUCAMAAADT0Ez6AAAAAXNSR0IB2cksfwAAAapQTFRFAAAADAwME6EOEoIOxQ8fmg8bbW1t8vLyzc3N39/fsbGxEyIuJlmBMn+6OpbdIUlpp6enDS0MDB4MEGINEpIOEooOMQ0Qhg4YsA4cpg4cXg4VNzc3bGxsdnZ2REREHT9ZLnOoOJDUMXmxEXIOD1MND0INUw0TkA4ZPg0RKmaVNovMNYXDGxsbYmJiJycnWFhYaGhoGTJGPT09Xl5eLy8vuw8eaA0VcXFxT09PLG2eDjgMcQ0WSUlJKGCKEXoNEFwNew4YIQ0PU1NTD0sNE5oOSA0SEGoNI1F1ADfaAy6uwZwAm34CHjg4SJycWMHBYdbWVLe3OXZ2FiQkQYqKDSUMEWoMFKEMFsYMEFQMEn0MFbQMFawMD0gMFJAMFr4MFJkMXcvLUK2tPYCAEoYMRJKSM2hoEXQMLVtbTKSkDjkMJkpKEWAMjIyMfn5+1tbWxcXF6enpu7u7QUFBm5ubQEBA////gICAf39/v7+/wMDAPz8/j4+PkJCQYGBgUFBQICAgEBAQ7+/vz8/PcHBw8PDwn5+foKCgr6+vMDAwsLCwb29v0NDQX19f4ODgRgVOkQABMyVJREFUeJzsfQlb4szW7ZcQv3PuReKVwQmDINjKjAIGFbVV0P//j24NGaoyaFO1kxio9TynCRXf067eSa3Uzqq9/+d/FBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFELQFBQUfgvU/aigkDWyFmUZZP1vp6Cg4EHdjwoKWSNrUZZB1v92CgoKHtT9qKCQNbIWZRlk/W+noKDgQd2PCgpZI2tRlkHW/3YKCgoe1P2ooJA1shZlGWT9b6egoOBB3Y8KClkja1GWQdb/dgoKCh7U/aigkDWyFmUZSJPXC+LHecG2cjT2sv4NFHgoQVdQyBpZi7IMBOj+738w/ut8y0TsyG/wH4D/ozhsK0eeVyaCnnLsMoEwRyXoCgpZI2tRloEA3f/9z//573//+3+db5mIHfr7/5uwKGwnR55XJoKecuwygTBHJegKClkja1GWAc+kuF8yzYMf6P7vf/4f801U7HQPh5pWrlR1vVYJ/VVHxyeGcXqGD+vn1nmDPScqChfNlmleUpKILUYp9EMwHLVy2+dV7hR0/SriF/pzahjXN/hIimPx4ELTDiiXYrdnmv1u+Id4XiCCPmgMLaseHh+Nzy1rQuhAxc70cKtpt03McT/8UzxHGNxNLGs6+24chqMSdAWFrJG1KMuAI3JBJsu0BL2AUdVrSOtq6Bv6XzvwN53NDWMPSfo9kgfbXkw43RAUhX3TbJV6DsuS2S8hPIR+CoYjy+uRPLxECPoT5Xgjy7Frdl1BL/bRQwr6X/MHXhCCPrMwwoLesC1riCR9CRg7xAqjZ/ZRHNFjGY7jc+inEhD0BeUSUnR/HIijpKCzFyEb3G0yS2yroWUXkJPYZS3KMuCIHLSaB6XUBJ18vOgdTXvVa2VNu9L1R/5vOjP+vmHFO9W0pdXQ6tY7c1JQFJpEAprmJf4SSxaGY0evIl4fJAlxtWpfFSIE/cgw/mjaMZ5y5ThemvuuoD+b/SI+Ni++5wUxz9ftcX0YJejW+wAr3gQwdqZJPgjTffOhiJ9i+qGfghf0kWXdadrYGsaPA3HcWNDjzR6pCboytAgiz2aPf0UuY5e1KMuAI4JmyHiN8wAp6GVdR5JXwKquaW09mHQ/wn+sjTlWhjs0YY7RzOkuk0SvQ0LvwGzhj4QFvaJ/4o8q1nFEU4sS9HvjGv35NkdLdDmOfdNLuZdMkolumqGkO7ygI9XWogRdG+E/6pYNGDsq6EXTxBfqLT524sgBXtCX1hT9ObCDS3RmHIijgKDHmT1SE3RlaBFEns0e/4pcxi5rUZZBiExagl4gnx39RfOErhISdII/WPJm1qQ+thqjIZ40CaSuwwOqDYjs7cFtxHkYjlfkoKyvyvR7lKBf4xcKmraHPuQ4EkauoJMYdtMQdIxIQSe4w5IHFrsSMQjs0+wKwRdz7AJe0Kf4xQGmuYwdB+LI3o+ShpaNBJ01QgwW2PywCP6I5WGUU0MLa9T5qOn6SyAhyBt71ifGyRl7MmFDCwzSMnuIGVpgzEgxhqsCjRz+T6RipwRd+NjJuKNQfNAvUYJ+M59jx9gSzSTjO9ufU6UE/Yvea3QOKYUy01AcP/VCpVJddZyvUYK+Z6zf7v9oT8aTHEc6e7iC/oWHLjMX9Jlt40kENnaXpjdxXLRaP8UOAojfYHmHVuGL+HEYjsz9KGto2UTQWSPEAE39Q/S/ceBn0CjGuTXJv6EFrR30QjX0io819hzN50+nxpo5mbChBQSpmT3EDC0QsYszXKHZtYaD96lJxk4JuvCxdkgy7lqbvEPvrMKCfnR/bVyTxLs2qs+m1sS/VKVEoUSvvZJZ6qI1QqsYPA/F8QrNGoVD91uMoN8bxg0RdBmOzyaeCekc0iTv0NFdlqmgj5ZTazqih4Cxu6UZd3TwfGleRqRXEhF0pNezKEH3x0E4MvejrKFlE0FnjRBLazLA70qCqmBZ5GNqNfJpaGGNOmje+cDTTjCjyxh77o0zbW38ZU4mbGiBQHpmDzFDC4QZKc5wxcyucrFTgi58jIKDM+7a4UrXa6ta+B26dmYYxskTPa6fW++z+sA9JSMKB46GX2A9KPbCT5ZgK/Ra5fPHFfqTJ+gSHJtk3UrnkFv0wNxv9VN5h44RLegNtOg7dyQQMHbPbpYdLe/MXiqZTMRvESPozDgER+Z+lDW0bCLorBFiaJGE7DhIlgr6wLIG+TS0sEYdOu+UV0FRYIw9T8YfJArHSB1unJMJG1ogkJ7ZQ8zQAmFGijVc+bOrXOyUoAsfazUaHO3xZVVtl2tRu7re/pzSZ613y65PkUa4738kROGiRXPSDrrhF7FQ1yGZN6our9iU+xlNuctw7JO4Oa/tLi5bvWaxHw5luin3wd2ErgUAY4eIehn34lff/ClDCwGSWm/EpNzdcRCOgfsxLUHH8AWdfIbJkmVfA9QUQZGOoYU16rzor3Tolf+bGGPPjXG6PjbOjvawMBAkbGiBQHpmDzFDC4QZKdZwhU4cXpFsqFzslKALHzsZdwdX+iry73szjCN8MU4HS2tcn1g0iysxhyA95+wbX2EjDvwcwnznwJjipDjSV1p9s+Vx+fmBOXlT3ABbqCBj52fcCYrEksMjk3kSiOOvEPQ7/DENCjrFlDyw5NHQwhp10I1Yfv2Ic+FSY8+9YRjHf+b09sRI2NACgfTMHi42M7RAmJFiDVfUFFfASReZ2GlK0IWPnYw7BVqgR95emnaC/Q2NBr4ucbrPWQIJX4dd070InZvsIbzMS0/Q6bY1DW9bk+F44ZtOnRG0QA/no9MWdO0cn4KLHZNxp+j9lIWAAM1YajGZTDoOw/EXCPqYvENv2JGCPsIZdy2XhhbWqINuxFddf4wWdHeZcbS+uTZOb7zxhA0tEEjP7OFgQ0MLhBkp1nBVQKLfXtEtRRKx05SgCx+7GXcNF0Zd4ZpxBI+rGl24v73hP9Hj1hs9MUWrnwVdQAhfhxd9s+8+SD6bvdtQAp4Aag55KWM6Lk1G0D2OtLDMX+PEOSHH8cBNNhTRDBK2qCQv6DN7Qt8iD8jH0hEAmNhhuBn3IplInrn1OkVSXqN365x+9TgGxgE4/gJBH9mWNbEnoXfoBM4jTB4NLaxRB92IlVhB95YZ6xPj7836zR1P2NACgfTMHg42NLRAmJFiDVePWO/LVfoaRTx22hYJ+kWpVGrhzSPh1AkDMEH3M+6f6PlL/3TT7xW3ROq9cbq3NzcMd0vhnTVZ2LZzJQpeh2h93iP7LfadHSWln3eUSMwhq0JBJ08qj4VCYYU3VjzyHN3Sr+4mCzmOjqA/9ND99RCSukRKvw6HQ9uaDIf0kX9h+WZpfMJ7pQwRO41ZEjzj67RlmuG9r8ntBqp73+pR4wAcf4Gga7OpfT4eTCKzLhMnnjk0tLBGHZJy78Sk3N1lxl9jvr42/NknYUMLBNIzezjY0NACYkb6xnDl/ahE7LQtEvQDN2v77TQCJuh+xv1lVah4m7v81eufPRST+bW/oXBp+7kicUGnwI+T/1jzQZjjIa2HgNlcuSUr6HXocdS0M9ycBYijM4dctkrdKHtREqVf3XIjdP73Vq93QzRmT31VAIidxiwJvvAr19ZlxJWaRL2OBq7L4XLxOAbGATj+BkEnICX+QnAz7nk0tFBQo06cKQ7DXWbsGddv98bx+tSg22aTNrRAID2zB8Wmhhaod5fxhivtA/83MrHTtkjQ/w25rOa3IbaVo2qfmgqg2qdmJuhogf5dxj2PhhYCx6jjLCVC29Y0/5x2doaF4U17cpd5CRtaIJCe2YNiU0MLmKBHGK4cl9wn3uEmETuMrEVZBpvwdJDLevsbYls58rwyEfSUY5cJhDn+DkEfNGzcVIeASUf4GXeMvBlaNMaoQwvLfOpVOs6kyzhjj3aNVnh4UzNBwoYWCKRn9qDY1NACZEaKNFy96tVD8jMf9IRE7LIWZRlswjMSmYhdythWjtvUVXM7wJZ+lTS0bFT6lTVCvJ9bFmmVR7BgeuOOPIujlj9DC2fUcUq/Oq9hK0xfY24r7R/j9Gk+f6NfEja0gCA1swfBxoYWCDNSnOGKlIQt+CVhJWKXtSjLYBOeCgoKiYIt/SppaNmo9CtrhJjaw8XIO8Wu0D2PO/2WM0MLZ9Tp4OYsroqzK3RuK612P/c3PyVsaIFBWmYPgo0NLRBmpDjDVahpi3jsshZlGWzCU0FBIVGo9qkR2A1DSyZQsYtC1qIsA4B/MAUFBRgICHqc2SM1QVeGFkHk2ezxr8hl7LIWZRkA/IMpKCjAQPJ+zETQU8a2Glp2ATmJXdaiLANp8jmJkRR2gaPCb4B6wFZQyBpZi7IMBOjmMouyIbaV4+5l+TIB1LY1BQWF1JG1KMtAgG4ufQ4bYls57p4PJxNAFZZRUFBIHVmLsgx4JhfNlmlG7T5gAbMTQfdw6O5K8As6uBgszi1r4nYUOG+w50RFIcDx4qH0YxEnGUF//CzQEkZv6xtNW4ffZg4aQ3eDqBRH2n3SqWXx1UccIzYwJ7FT5g7viJl9Nw4VO78A162mFQ8umNodDPLMUQm6gkLWyFqUZcAR2TfNVqn3w85XMEEvYFRxsYDOitZ5CFZWHqDpcoj+N8Y9oOwF3y9CUBR4jkUsg/2k6iFoWhk36aVPKk/GU5Sgz/xC6HIcS7i4Q6lECmd2kbAjjj+1JwaBU7MipHb+OFjsaIlsxAyX3eri57CUBD01jgmZ4rYJ22poycvvKYOcxC5rUZYBR6RJWhc2w/2OOEAJOvl4wdX8OqTwT8Ur5+diSVoz1/GMubQa6OCdOSkoChzHYt/s7UfUcIIS9HJNr3acugfXuK5wWNDr9rjuVN2U48iUCb01cf3MZipiR6tKjq1h/DhY7JzS2JekIhX5Mx1BT49jQtvWEoUytAginldqULGLQtaiLAOeCZGEqMZALCAFvUwLLZLCP+EGOkNaOhq3ZsaVh+t4qb6ULXDEckSaFyXnYILe1gteCapTIzLljks5OYIux5ERdFrBqdgKL9GT6gcxiOkHQcbBYkcFvUgLTvbN1FLu6XFMqLBMolCGFkHE80oNKnZRyFqUZRBB58DrERQNoAY65LPDFFr84IouYnhCt9Bm1qQ+thqjIZ40CaSuQ8rx1mxF6zlUCWJ95deFNgwt8h265vGU44gE/faAlpi8JFkINPIc/KFMOjaCxa5E1HufZldIBNMR9PQ4svejpKFFVNAtD6PY8VwaWtjSoOVOQY9qqF2+esQrC/yfrE+MkzP2XMKGFhhBjzZ7JBE7EUOLeOzaTOwCJV5dQMVu6wQ9oiMxB8j2xC9+a6PH1SrYzHBIGwJNcSvHJboax3e2P6dKCTrl+Gw2Ly5bvWZY1WE4vurtx5dVtY1VnUr5t4Iux5HOIaSVB9L24vMXfcv8HS8IoN99sLwjD12x47Cxoxl3OnukI+jpcWTuR1lDi7igDzHOvX5r4fFcGlpI844abd7xSNy4EYJOGnQSUTiaz59OjTVzLmFDC4igx5g9koidiKEFInbsMQeo2G2doEcs7DgACrrX2ujw9UV/OdQCGJN36A2bTKSj+mzqdxSQFIWS8yb9odVDj8+9xLr+fa6q6NqrIpL3xl/tJ0GX4lgyS1203sE5ByToz2gGSUvQkZbNosTOH4eMndPi6Zn0zU5N0FPiyNyPsoYWcUEnH1OrETueS0PLq467eFzpuAf61ap9VYgSdLLAIKJwb5yhu/Uvcy5hQwuEoMeZPZKInYihRTR2HTKFfpAdUWwcOUDFbtsE/SA2D+0AUNC91kYd9MBcDWVRRrZlTezJ2HI3rr3P6l7rJxlRcDiWzN4BacH8Q2pa+LWCXr0iLXpfNe0Ye+J+EnQZjhc43V7sYS5oOummKOiLGLFjxgFj57R4ajLrdB555sjej5KGFjlBH7CdUoPjuTS0FGgysI3XcWXyPULQa7qbtsWttNfGMVIHkY5dAoYWCEGPM3skETsRQ4to7Cr6J/6o4pCxceQAFbstE3Qkb1/f0wUU9JqfcS9/1GjUWMym9vl4QDNE75Zdn1qWt3CQEAWXY8l0cmKhHsxQcwhhVMHpIZoC+kHQ5Tl28fRBUu77KabcGzHpaHccMHZo7tinH3jKTDHlngrH0P0obmgRFfQhWd41uE6pgfFcGlocAa+4QhAp6MSqS0Thxjhdo4fwoz0sDAQJG1ogBD3O7JFE7EQMLaKxo2/Gy8SSFIyj/8NAsdsuQUdaF36JxQNO0L2MO0GZZFTCqFu2hi/S6WBpjesT16sjPod4HJ3n6AjTAOgc8oG/G8Yewqkxj5hnXUEH4Ei4/DJTHGTs3Iy78wqvb7Z+iB0E0uMYuh/FDS1yLvdQxp0bz6OhpaB/4I+X7wSdCgf9894wjOM/c+PePZmwoQVC0OPMHhRJxG4zQ4uwWHzqhUqlusLLv2AcXYDFbqsEvWv+qOeAgv7K+9qrUc/MuLoMvjwbDXy94pSRM9MIX4c+xya9yR7oQp0F1Dt0ctF9ooX6jeEh/Bu5gi7D8ZLh4mSlU9y2psVs6aLjcLHzuF34JtvgT+SZY0jQxQ0tUoI+CmXc+fFcGlrwu9fO6jtBfyUpNSoK2tH65to4vfFOJmxoARL0SLMHQRKx29DQIi4WV1VdL5D1XjCOLsBit0WCftE3+xHbKwKAE3Q3414m6/RXd71+Mz99c39k0LB9u+0UrX4W1PkufB2yHPfN1gVZsIecxFBeDmzcf1z5U4eXcp/ZE3/GHDKuU1GOz2bv1n2ZQH04D2Yv9FNJFV15t87pV49XYBwkdhh97okzzcIyaXAMCrqEoUVK0JehjHtgPH+GlsOVrtdWtfZ3gt4mE5IjCnjz09+b9Zt7MmFDC5CgR5o9CJKI3YaGFokVeq3ySVfowTi6AIvdFgk6uvB6ZC/Ct6t0MEH3Mu6veq1QQHFyXqg/Ge6Wg/dzy7LevUvvzposbNv5Kngdchz7OHFrhl+hQ3HEOywK7A4LT9AXbv322XA4RM8sw6HzyCzKschycXbKhHc8JVcWte59q0eNg8RO8zPuDtIt/Zo8x2BvBQlDi5SgT2Iy7u54Dg0tGt1BWnN1PErQ6UlHFP4a8/W1YRjuhuaEDS1gKfcIswdBArHb1NAiborDqdwyTeEG4ugCMHZZi7IMOCJdN4sZLu7AAEzQvYz7B654vnpxI+Sv0Kf2cMGWt1jafq5IXNB9jsUHJHv9iKQmVD2ET1wDgSlRH16h192SD64qiHIsdjEXJ3L7/eiaJEk0LmngWhbub89kHrhxkNhp3pLARVrNWdLiGMiYyRhaZAT9p4x7Hg0tFH45yihBpw0mavoK/Td7xvXbvXG8PjWO6MmEDS1JmuIwEojdxoYWSTMSY4QLlxUFjF3WoiyDTXg6yGU1vw2xrRzz3Fr0X5Fnjtz9KGdokRH0nzLueTS0EKCFnasJEYL+6LeA1LSzMywMb9qTu8xL2NACMWnEmT2Yc5kaWsAEnYmjA7DYaTso6Dmst78htpUjzysTpBy7TCDMkbkfZQ0tMoLOZNw5swebic+boUXDBV9XfgMoRtAfVzVmt433HhY3VDoim5oJEja0QEwacWYPDOjYYWxqaBE3NL6UiRnJ8VwxcQSPnbZzgs4jE7FLGbvAUeE3gLkfZQ0tEoLOZtx9w0AgE587Qwt+/UXaOiIVKGDPTq1QoNXGKnwZWF8U/hinT/P5G/2SsKEFZNKIM3skEDtNwNAiYWhcYTMSUXEmjloCsdN2XNAVFBSgwAu6lKFFQtDZjDu7yuMz8XkztLysChWn0MWVm52lWhC7ytPu5/7mp4QNLTCrgDizB3zsNAFDi3DsDmlzFhIlJo5aErFTgq6goAAC1T41ArthaMkkradiF4WsRVkGAP9gCgoKMBAQ9DizR2qCrgwtgojnlRpU7KKQtSjLAOAfTEFBAQaS92Mmgp4yttXQkpffUwY5iV3WoiwDafI5iZEUdoGjwm+AesBWUMgaWYuyDATo5jKLsiG2lWOet3T9K/LMUQm6gkLWyFqUZSBAN5c+hw2xrRzzXHTlX5FnjkrQFRSyRtaiLAOeyW0T77b4oTwVzE4Ev7DPIXfM4ej4xDBOSbWf+rl1ztWXFhWFi2bL3UXiFzi6DfwQVOlXutvCHX/8LIS6+GraR03XX8iG2PWJcXLGntq8YyPdOVLcL0VtfE2mLOod3ikTUZrKH4eKHRcvli+HPHNUgq6gkDWyFmUZcET2W7QGgmjHxs0EHZfeLVRxsQD2mMXZ3DD2kKTfa9rIthcTpimZsCjsm2bLrfNASxCjb/3gT8FwLOPmLDW3OUsZV6yvdYJ/VQX9DJJ9pOhH8/nTqduXhmAzQe9jMrhwJi3FmJKgO7UsQmrnj4PFjosXw5dHnjkqU9yP2FZDS15+TxnkJHZZi7IMOCL75kMRV3sI6RsHKEEnHy+4mh97zOLM+PuGm6+d4roIDa1uvTMnBUWh6TRfxiURnNLDl+GS2VDtU6tltAKniQek7tUOWwOB4lDXP3BtQ/Sf3Btn2tr4y5zcTNA9BT9oNQ9KKQk6rTY5tobx42Cx4+IVTVDLN0e1bS0Cu2FoUdvWvj8Wxi5vWyOZ5wOz9S1dSEEvkw6q7DEH0itnbcw1Unm4bo3RzClb4IjIAOVIBaLIFzAkgOr694k/aNc/pNlhOXdbzpVXaImOKw+vjWOk7CIFjhiBK2pxegcvdrQC1SDUD4IZB4sdF68UBT09jqqwTAR2w9CSiaCr2EUha1GWQQSdL76gXwhADXTIZ4fIGXscxh/jWtNm1qQ+thqjIZ40CaSuwwOiDSXyAnY/gi8MR1qHsKyvynglvorSc+1FJ91VC+jjxjhdHxtnR3tY1Ak2FPTbg1v2a8QPwYtdXMdGZhwsdly8Anx95Jkjez9KGlqEBX1I2/kG0xHaYHFuWRPSaTv3hpaAuYWFa3RJ29ACIl6DxtDiXvw4GI1x7EjIsjS0iMeuwsTLNx0FARK7bRP0i1br+yZPkO2J2Sx7KONOcTOf4wXrEs0x4zvbn1OlBJ3tuhyRcQfrB6EXKpXqCvN61duPL6tqO6jqBf2q/PpBOwPeG4Zx/GeOPQMUm88hpQvvazqCPrTqg+UdWqEu4sdhY+fEK8DXR545MvejrKFFQtAnQ4T3wPBggkQe/W+8BYYW3tzCwjO6pG5ogRD0GXkUCwt6w6Zmj2XGhhYQM5JvOgoAKHZbJei3z5fmZdSyhwGgoB8yWfbDcMYdheb+2rimTepH9dnU7yggKQolf568jci4g3G8QldegVj32/rnqoquyWqAJBL0V3R10la/R+uba7+jwKZzSKmL1jutovs1NUFHWjaLEjt/HDJ2brwCfH3kmSMr6JKGFglBj1ji4ScW3Omjji2AuTe0cOYWFr7RJXVDC4Sg1+1xPSp8Det9gF9bTjI2tIjG7lXHHViuiIozpiMeULHbKkFHj8xm79v2TqCC/spk2V+jMu5naM168kSP6+fW+6zutQ+SEYUDRgeeo94wgK3Qa5VPukIv6NUr0tL3lf+bkKBXPEHHuaK/N+s39+QmHC/wc1ix5z6ppCjoixixY8YBY+fGK8DXR545cil3/Ie4oQVa0Ie0n/YYkc29oYUzt7DwjS6pG1ogBB1ffJHhG+E/6pYNaEYSMbQIv5+lyds2niQZ0xEPqNhtlaCjCH31zXDmhAWgoNeYLHstOuP+9ueUPmu9W3Z9almW+/5HQhQuWuaX96UfkXEHm0PItUfmjQKdQyrBNB9JuXdoyl37a8zX1+gZxn3/sznHrvt4kmrKvRGTjnbHAWMXiFc3/DiWZ46h+1Hc0CIj6KP6KGoYf2CyuTe0sMcsGKNL6oYWKFNczPOYhrugTwHNSCKGFnFBJ09eZJJkTEccwGK3ZYJOHrq+TbrDCfqPGXeCN8M4wtfpdLC0xvWJ5cw14nMI0nNfEyIz7glch87xR/C+Za/PPeP67d44Xp8a9C2DAEfPHPCLTHGQsQvG6ytsxMkzx+D9KGFokTTFDYOb7od4ix7mDG6KSN/Qwh0zYI0uaRtaEhf0mW3jmGZpaBGfSD/wx4szkXqmIxZgsds6Qdd6cfkTCjhB/zHjTnGC/Q2NBr5WB2iF4CyBhK/DrslOGpEZ9wQEvU0vwE+6UPfh8CYZpLMzLOpv2pO7RN+A46VJ3pQ8uOmVdLetaTFbuug4XOz8eAX5+sgzR+5+lDO0SAj6cDG2LXvAD4/JO/SGDW6KyMDQwh/74IwuKRtakhX00XJqTemjZoaGFtHYtck79M7KEXTfdMQALHZbJOhFEo/nyDWrDzhBj8m438xP38jBG/lAj1v0K1odjMg7IALB6/Cib/bZB8nIjDvYdfhSJu/NO9iHs3okx/S13eOqRhMS1OPxqVed//Yarc7xOyCCDTg+m71b9mVCuoVl3q1z+nVmTwZR4yCxw3DjFeTrI88cuftRztAiLOgzPO8PzoPpiJFtWRN7MgY3RWRgaOGOGfBGl3QNLckKegOty8+dN0bZGVpEY3e40vXaqtZ2BL0SJehgsdsiQX/Gmw9aZqTE+QAT9LiM+5PhbDm4N0739ub+K+U7a7Kw3ZWD4HWI1uc9st+CkozOuMNdh6tCQacVbfHOi4K3UwZdk1feEd6F4dpz/hinT/P5G/2yAcdiH+8jMc0mOr4o4TCiWIZSYMmVRa173+pR4yCx05h4sXx55JljIGMmY2iRLCyzoNkHBrOpfT4e0C1POTe0sMcsOKNLyoaWpFPug7sJNbdnaGgRFguaTK9RMxJjOmIAGLusRVkGHJEv/Cqkdfltwh1Q0OMy7t4K/c8eisn82t9QuLT9XJG4oFPQpU90xh2K4yGtX0EeVcqf+Nh1cngrdLR2x3US/Jnlfu7nijbhWOziOiSE1YHLMRTJJBqXNHCDEncS8VavgXGQ2GlsvBi+PPLMMfQKTNzQIinod+HKMhjEKb0thpYIUfCNLmkbWpI3xQ0sHK4sDS0y6VxsZVxp8aY4wNhlLcoy2ISng1xW89sQ28oxz61F/xV55hi+H4UNLaKCPqU59XcrWFkGAy3Q8dm8G1riBJ01uqRtaEle0LVzfCpLQ4vURIoW6Dg6rOmIBVTstB0U9BzW298Q28qR55UJUo5dJhDmyNyPsoYWUUFfWucjnIVwvABMOkIbNGxcm4Qiz4YW9lhj0mUBo0uqhpYEBN2L3YB8LC3LCWVWhhaJibTcWdFXlwHTEXjstJ0TdB6ZiF3K2AWOCr8BzP0oa2gRFXRS4nVouduUfcPA+7llkYpjFHk2tHDmFtbQwhtdUjW0QEwas+FwaOPKvfS9jxe7JR6z/dfmWRlaxA2NVV3XP6ly86Yj8NhpOy7oCgoKUGDuR1lDi/A7dKYJi8au0Kf2cMHWm8mzoYU9Zg0tvNElVUMLhKDXLQdUx73Y3eHaAnb2hhbh2L2sChVvkyFnOoKPnRJ0BQUFEKj2qRHYDUNLJmk9FbsoZC3KMgD4B1NQUICBgKDHmT1SE3RlaBFEPK/UoGIXhaxFWQYA/2AKCgowkLwfMxH0lLGthpa8/J4yyEnsshZlGUiTz0mMpLALHBV+A9QDtoJC1shalGUgQDeXWZQNsa0c87yl61+RZ45K0BUUskbWoiwDAbq59DlsiG3lmOeiK/+KPHNUgq6gkDWyFmUZ8EyKBxd4n0WogB8HmJ0IuodD7pjD2/GJYZw+4cP6uXXeYM+JisJFs2Wazm6gYjPR3RYcr8f2iivx6mI2tt3yoVIcaQdDGjuWI4ckyqLe4fKnwWab3DhU7EwPtBzqxUPJDAcvzxyVoCsoZI2sRVkGPJMunh/TEvQCRhUXd2CPWbydGsYe+t8x7vVkLyZcUUNBUdg3zVapR3eFknoI/eTqIbC8Orq+YpuwuGhYlu00+JDjWMLFK0qlhwDH73iBwGlQElI7fxwsdrh6BULP7ONvRfwI0/+pfTYIUuOoTHE/YlsNLXn5PWWQk9hlLcoy4JmQdvVpCTr5eMHVF9ljFmfGyRsu+WMcaUurodW58tKCotAkLf6apCTCvtkr4pYC4f4X8BzbpJlAO9T1fUzaVI5xbys5jkx5SZYjj6Rai45DrTyYcbDYmSb5IFcpfhrr7f9cWAwC6XFU29YisBuGFrVt7ftjYezytrW+mWLKHf9ZJl1T2WMWT8Zf/HFirEnl4TouR7mULXBEZO/AbGk4IUGaCUT0v4DnqJGlOW0YxIEm23ETKzmObL1ohiMPeLFbkjabAzu4fGXGwWJHBb1IC042zVJ0lfM8c1SFZSKwG4aWTARdxS4KWYuyDHgmZMZMR9AL5LNDVqzsMYs1mZTejPmbNrMm9bHVGA3dGtNSbXxxPUbNZVo0WyFlgOdIceUs2oOoW5YmyREJ+u0Bm2o4cBa0LODFbkoyDNqQfkSOg8WuRC7MfZJ5uI2IGkWeObL3o6ShRVjQh7R6aKh7KlMSNo+GFq1cIeVe6XGnEH77hcevHvFNiv+T9YlxcsaeS9jQAiJeg8bQiuq2Nhrj2JGQZWloAYmd9oFLvwZ6reGfaXs/IxW7LRJ0OnukI+gUbJY9lHHHfer3np5O5jg2SzTHjO9sf06VEnSnc++DWep2e62f3sPCcfyIuW1p92kpjnQOYVpARHQnTkDshlZ9sLxDK9RF/Dhs7GjG/dlsXly2es2wqueZI3s/ShpaJAR9MkQIdk8lTVsmpGlLLg0tZdy8o0abdzwSp2qEoJOGqkTQj+bzp1NjzZxL2NACIegzppA7i4ZNzR7LjA0tELFzm7MEFZ35GbnYbZGgP5MUdIqCfshk2Q9DGXeNPGoZe7RJ/ag+m/odBSRFoUTeMiOu6E4rhd6gJ8ZRK+ivWhScxZ8MxxJ6OEFrAX/d6nJkkYjYIS2bRYmdPw4Zu1s34/7Q6qH5v/dDdgUC6XFk70dJQ4uEoEc21F5auNNHHVsAc2loedVxF48rogRXq/ZVIUrQycM3EfR740xb01d+DhI2tEAIet0e16PC1yBd8ha4922mhhaI2NH2qe3QP1dHr5bxikk/lIzdFgl6kwQnRUF/ZbLRr6GMO16hnz79pSt0kit6n9W99o0yonDg6N6D2e8+JLxCf+Uy7qvwQ4tGXqFTXhIcL/BEWOx5Kn4QlZNOROwWMWLHjAPGzmnxVDJ7B6QFc+ipJc8c2ftR0tACLehD2nxzjMjm0tBSoImyNl6Dl8n3CEGv6W7KHbfSXuP9NfciHbsEDC0Qgo4vvsjwkS55AEYdBiKGFojY0fm0vAou0Sv6J/6oorDKxW6LBL1PLsMUBb3GZKNr4Yz7k3GN/nw7IdmTd8uuTy2/p6+EKCAZ+MKfXaINxcTmEAKG1+MKP1yGMbOxW1qD4Nh1VwIuRx4JpaMbMelodxwwdugiJc9fJTr/d8Mp2jxzZO9HSUOLjKCP6qOoYfyByebT0EIFnCTVme88iMWFCPqNcbo+Ns6O9rAwECRsaIEyxcU8j2n4vd5U1qjDQMTQAhG7F5rkDOU6qfOhjJdMcrHbIkGnr0X6ZuvbSQRO7H7KuO/R9yBPxhO+TqeDpTWuTyxnrhGfQ5DW0TW58xzdDZs5EuGI9Dz0zIKB9JyqAABH98W5x5FHJoYxyNi5GXc3dhFGgTxzZO5HWUOLpCluGNx0P6QPnVN4U0Q6hpYCfZh++U7QqSjQP+8Nwzj+Mzfu3ZMJG1oSF/SZbeOYZmlogYgdilv59cN/MPPwqRcqleoKT7FSsdseQb/wjYvf0YUTu58y7qygNxr4Wh2gFYKzBBK+Drumq3VpCLrPq6JH6/nCW9XJcLykJB7chWuknie2pUuL2dJFx+Fi52XctSbHl0WeOTL3o6yhRULQh4ux7b4E8jAm79AbNrgpIiVDS5u8h+2svhP0V5K2pYKuHa1vro3TG+9kwoaWZAV9tJxaU/qomaGhBSJ2KG6vuv4YFnTtqqrrBVprVCZ22yPoDtJLucdk3B9XNbqoPTau3zTtZm44WxCmaPWzcLLTotfhRd/suw/OTfOyGL2Yhef4WNNrzFufm/npGzmYTdibS5zjs9m7dRPtLEceSRVdebfO6deZPRlEjYPEDsPJuGv7ZuuC8P3hdQkE0uPI3I+yhhZhQZ/heX9wHkxHjGzLmtiTMbgpIiVDy+FK12urWvs7QW+Tm9URdOzI/XuzfnNPJmxoSVbQG2hdfu68McrO0AIROxS3SqSgf+q1yiddoUvFTgm66HFcxr3i7ig5mhvzvT3DOHVO3FmThe2uHASvQ7R27ZH9FmjiuEXP0KWSSTdecIDniEhVSSFYesE9Gc62CrQ+P8e7hIbOwk6UI9n1U6K7fliO3/ECgVP+tO59q0eNg8RO8zPuWNk9vjzyzJG5H2UNLZKFZRY0+8BgNrXPxwO65SmPhpbHl1W1Xa65Oh4l6PSkI+h/jfn62jDc1UTShpakU+6Duwk1t2doaIGIHUm5d8Ip9wo1y1VJCOVil7UoyyCCTmqCHpdx91bo2hFtzvLmnlna/nJWXNApcMb2ltaySOrVj8bwqrh9Wuh16K3QF5YD13AlyrHY9epycBy/4QWDxsRtLqMxq9fAOEjsND/jjvg+YL7hRGauOTL3o6yhRVLQ78KVZTCIUzq3hha2VGOUoNPmCzV9VcDv+67f7o3j9alBt80mbWhJ3hQ3sHC4sjS0QMQuzhTHGuckY5e1KMtgE54OclnNb0NsK8c8txb9V+SZo38/ShtaRAV9Sh8t361gZRkMtEDHZ/NqaMHVR9yFXYSgP/rtETXt7AwLw5v25C7zEja0JC/o2jk+laWhBSJ2zhIptG2NFXSZ2Gk7KOg5rLe/IbaVI88rE6Qcu0wgzDF0P6afcl9a5yNmJyWTjtAGDRvXJqHIoaGl3Fn5DR0ZQfdTggRX3n9yjVZ4eFMzQcKGlgQE3YvdgHwsLcsJZVaGFojY0cIyn3qVjnuxa+svZXYnkWjstJ0TdB6ZiF3K2AWOCr8Bv0DQSYnXoeVuU/YNA+/nlkUqjlHkztDyWUUr708y+z8WCgWkD4UCXeVV+DKwvqD/MU6f5vM3+iVhQwvEpDEbDoc2rtxL3/t4sVviMdt/bZ6VoQUidm7pVydiXuwOV/qqUNC9JzbR2Gk7LugKCgpQ+AWCzjZh0dgV+tQeLth6M3kztLysChW6qQkXEdWZcu6xK3Ttfu5vfkrY0AIh6HXXkEN13IvdHa4tYGdvaIGInaZ1cHMW9wnMj90hbc7iRVI0dkrQFRQUQKDap0ZgNwwtmaT1VOyikLUoywDgH0xBQQEGAoIeZ/ZITdCVoUUQ8bxSg4pdFLIWZRkA/IMpKCjAQPJ+zETQU8a2Glry8nvKICexy1qUZSBNPicxksIucFT4DVAP2AoKWSNrUZaBAN1cZlE2xLZyzPOWrn9FnjkqQVdQyBpZi7IMBOjm0uewIbaVY56Lrvwr8sxRCbqCQtbIWpRlwDOhXf9SKf3qF2XC2xE+8E6EQOkfjdtBUz+3zhvsOVFRuGi2TPOSVkBgdpfwSIAj3VUR0W/tDpcPJXtIpDiysaM7gCLKUyVRFtX//ePGoWLnF0+75Y555JmjEnQFhayRtSjLgGdSMvu4BEKogB8HKLEjrUqqpBCAUysgqOikxsWE1LgY2fZiwhU1FBSFfdNslZDcYUUn9R/6ydVDYDl2VpRjoAKx1+BjJsuRid0+emTBHH/q2AgC//ePGweLHS1vjpj1+WMeeeaoTHE/Qhla8oucxC5rUZYBz6QUrt0XBpTYkY8XXKmPVvNrh8K3JC2Y63jGXFoNdMCWlxYUhSaRuSYpifBs9ou4docZKs0Iz7FDCh1V/NKTDmgLzjFuhSHHkYndvvlQxBUtfiq6BQHm948bB4udU9b8EhecZI955Jmj2rYWAWVoSQwqdlHIWpRlwDNJXdDLpLsorbdfDtXbH9JahbgFM648XMdL9aVsgSPC8MBsaZguUYNmUv0gWI4aKXTkN3tysSRtKgc2emiR48jGjmShKUce8GLH/P5x42CxoyJeJAUn2WMeeeaoCstEQBlaEoOKXRSyFmUZ8EyQKNweBN9JBgEjdgXy2SFSHtcRz+kzsECCPrMm9bHVGA3dGtNSbXzxqlxLvsETy5HigzmmmFpL/DFEH3IcQ7H78goz+oAXO+b3jxsHi12JGAT2CS/2mEeeObL3o6ShRVjQR2NsXGl8N55HQ4tWrpDSoPS4U9Aj2qeyRpf1iXFyxp5ThhYOIoYWkNjFGa4K1K6E/xOp2G2XoJPLMLo5kAvIFrckG0171n+Ee9YPaUMg0tBxaVnW+M7251QpQaede0u4G5LX7JAFPEeCx9UqeCGih5bB8o48tMhxDMbuotUKxxF+DmF//7hx2NixWfaIjHuuOfKCLmVoERX0hk2NAcv48VwaWspIBgrof23N7ZMaFnTG6HI0nz+dGmvmpDK0cBAxtEDELs5whVSkhh1Ln7Kx2y5BL3XRM3Mr3CCBAaDYHdJsNBL0VxShkKCPyTt0NJPgiXRUn039jgKSolBy3qTjd+jojktS0B2O6OD1RX851AJAooC0YLaQ5sjF7vb50ryMSLUkInb+7x83Dhm7WybLfhuRcc81R17Qpd5/CQs66ai28PukhsdzaWh51XEXjyuiBFer9lVEP3TW6HJvnGlr4y9zUhlaOIgYWiBiF2e4YvrhysVumwT9AmtAsRfxNMkAUOycZvUoFJUoQR+hRcHEnowtd+Pa+6zutW+UEYUDqnu36OG53+on9g6d4NXNsnfQoqAaYEhEYeGLhQRHLnZo6WP2fsxeQoD//ePGAWP3zGTZnyMy7rnm+AsEXSMd1eqWHT+eS0NLgSbK2niScVYR4d/HN7rgVtpr4xipg0jHLmVocQAfuzjDFRNQudhtk6BTdKOmSR+AYlejgSIp90445a7Npvb5eEAzRO+WXZ9afk9fCVG4aNFcu3Zx2eo1i/3wzAnPEaP8UdM/A38TSds2nHSuPEcvdsWvvhnO1iaUjm7EpKPhePnoM8uAfkTGPdccA4IuY2iRM8XdEQmIGc+noYXO994kEynoBMTocmOcro+Ns6M9LAwEytDCQcTQAhG7OMMV/qHDK/JEJhe77RP0r++NOHBi52ajY2NEQBcLQ2s6WFrj+sRyejKLzyFIz1kd+PnhGYIjRZnW0WHA3oMAHJnYFcMWlWzmEMjY/ZxxzzXHsClO2NAiJegz245wVnnjeTS0FHCmFs81Pwm6Y3S5Nwzj+M/cuHfHlaElApsZWiBiF2e4ckxxBenYbZGgOzfTQ8TSjgGc2LnZaOcznEXBQAt0fHk2Gvi6HKCL0lkCCV+HXZO78NACPZybhufooBqcRmg2TCNZMhmOEbHr/ZR5gAD7+8eNw8XuHzLuuebIC7qUoUVc0EfLqTUdfTueQ0NLm7yH7ay+F3TG6HK0vrk2Tm+8U8rQEsaGhhaI2MUZrtCJQqW90ld4/SQTuy0S9Gezd8skpGMAJ3ZuNpr6HD71Kh2+mZ++uT8yaNi+PWeKVj8L6nwXvg4v+mafeVguohkkbFdJgGOZrNNf3fW6x5H6Vd6tc+fnRTmysSsW6Ugae7QDv//MngyixkFih/Fjxj3XHNn7UdLQIi7oDbSGO198P54/Q8vhStdrq1r7e0FnjS7rE+PvzfrNPaUMLWFsaGiBiF2c4Up7xM9h5SpN80rEbosEnewcKUXtHGEBJnZ+NtrZieDcYU+Gu+Xg/RzNIe/epXdnTRa27XwVvA7R+rxH9lhgLXjooXvt4ce0LQTHV7ypAl2Xzgt1n6Ozo8TdSCLKkY3dM9400zJT2dIV+P0X7BEErwB+zrjnmmP4FZiwoUUm5T64m3BG6NB4Hg0tjy+rartcc2eZuJS7Z3T5a8zX14ZhuBualaEljA0NLRCxizVcUVRIRlQudlmLsgx4JrG1HViAiR2Tje7gWgHuDeav0Kf2cMHm/pa2nysSF3QKzPKyVepG2o7gOX7gVzyrCI5aA9d88DeGinJkYveFX+G1LiNM0knUsuB+f2/1CsaLx88Z91xzDAu6sKFFzhQ3sKyIpLs7nldDC1uqMd4UR40ue8b1271xvD41juiwMrSEsKmhBSJ23xuutA/830jGLmtRlsEmPB3ksprfhthWjrtXbTITQLRPlTW0SJZ+PeeKjwTG82powRVK3IVdvKBTo8vZGRaGN+3JXeYpQ0sImxpaIGIXZ7hy3I6fOL0iEzttBwU9h/X2N8S2cty9fhCZQJgjcz/KGlpEBX1Asg9Ly6JZCC8dERjPo6Gl3Fn5zZEYQX9c1eh7sYDRRbtGKzy8qZlAGVpC2NTQAhG7gOHKi92rXj3EX6khXjx22s4JOo9MxC5l7AJHhd8A5n6UNbSICvrSmgyHtveK1TMMBMbzZ2j5rOo6qQSHpv0C9rPUCgW6yqu49p2A0UX7Y5w+zedv9IsytASxsaEFInYBw5UXO1IetuCUh9XEY6ftuKArKChAgb0fJQ0tooJ+N7Qsyw4bBgLj+TO0vKwKFacIxJXugGqBt8oLGF007X7ub35ShpYgNja0QMQuYLjyYsc3cNHEY6cEXUFBAQSqfWoElKElMajYRSFrUZYBwD+YgoICDAQEPc7skZqgK0OLIPJs9vhX5DJ2WYuyDAD+wRQUFGAgeT9mIugpQxla8oucxC5rUZaBNPmcxEgKu8BR4TdAPWArKGSNrEVZBgJ0c5lF2RDbynH3snyZAGLbmoKCQibIWpRlIEA3lz6HDbGtHHfPh5MJIArLKCgoZIKsRVkGITIXD6WIYk0s4Ir/PH4WaHmf4I4DF3inDMJQIx0FzhvsOVFRuG3iHSXOrtCvvmleRjSoTIJjO5qjdod3lJA9JFIcacdGWmyyeHCBq2iGi4YmsVPG//3jxqFiZ3q4RRybMbu68sxRCbqCQtbIWpRlEKBSxLLQjyr54wNK7Mp402cN13AgNQFqfk0AD0NczGI4fNe0kW0vJlw1SkFR2G8h0UNSQPogdenxT62KE+Xo1HyYyXIs4QIWpdID5dVNTdD93z9uHCx2uFoHQg+XEyO1O/pRdVfyzFGZ4n6EMrTkFzmJXdaiLAOeCZole/vf9l/WwMQOKVy143UiK5NqD8F+6ENvilxaDa3OdYASFXRSjKpLSkzemrg2Y/NH4YPg2NGrZdw6QD/UONCqjGOchZDjWGKKRV/iulTpCDrz+8eNg8XONMkHYbdv9oq4/UUaPS/S46i2rUVAGVoSg4pdFLIWZRnwTJC2/STnYGLX1gtOiR+tQGsttkMd8XxBx5WH69YYzZyyBY6IANCmTrS6UbEVXqLDc6zQnozVYEsI2jdhgPsmyHFkBb1vppZyZ37/uHGw2FFBL5Iik13a+CLFnhdpcFSFZSKgDC2JQcUuClmLsgw4Irdm62c9h+pZr69crXP7JIRb3CJBH9VJl7+ZNamPrcZoiCdNAqnr8ItI+SVNvJfoBwt4jldksMyMUDCdDeU4IkG/PXCWq0T50hH0f+jYCBa7EiG0T2JH2RUjrtg8cwwIuoyhRVjQGeMKh0Fj6FYDz6WhhTXqPLZXTPlQDq7pZX1inJyxJ5ShhYOIoQUkdtoHLv0aTOZy41Kx2yJBfzabF5etXvN7VYeJ0avepl3rNSzoH3joJULQydSCL8QlOhjf2f6cKiPoF60WnjiQBhafv+gbZx7wHLVPvVCpVFcdjQd6Zhks79AKbyHJkc4hJcyLzh7pzCHs7x83Dho7mnHHvSlL3W6vlUbPi/Q4cvejnKFFQtBd4wqHGbkXiaDn0tDCmlg6ur5iGnww8EwvR/P506mxZk4pQwsHEUMLROzc5iwhRffH5WK3RYLeNB9aPRSX3reKDhOjtv65qqIY4TfLbfIOvbOKEPThYmxbpC/QqD6b+h0FJETh9vnSvCRLWSToz2gGSUzQWY5ojY6utkLgDToRBaQFMyoWMhxLSOCaLbJifSbp6NQE3f/948bBYqcxbZ0O0OxfiujmkWeOXHMWOUOLhKBHdkKv2+O6cyqXhhbWqNPWXzX850vwr/JNL/fGmbY2/jLnlKGFg4ihBSJ2tH1qO5SNZ8blYrdFgl4yewekjW8oBc0CJkYFvXpF2teiW+twhZ6KV7XwO/QZTrcPzumyp35uvc/qbvsgcVHYR+ueHpFwdNt1kxR0liNaodcqn5Er9IUvFhIcL/DNVOzh2DXJGjY1QV/EiB0MryDctk4PZr/7kNoKPSWO7P0oaWiBFvSBfyqXhhbOqEOW5lf6KvhX+aYX3Ep7bRwjdRDp2KUMLQ7gY/dKHsPKq+ASnRmXi91WCbqTI/q2BzNUjIhJrELSKDQxXYtIgWEsyOX4btn1qeX3Y5YQheJXnzAlKff95FLuLMcKvd5CpjiStm046Vx5jl08LfbJjZViyr0Rk46G4+WjTzPuXTr/p2KKS48jcz/KGlpkBN0xrkSdwh+5NLSEjTpoxRf4mxjTy41xuj42zo72sDAQKEMLBxFDC0TsXsjyCA298n8TMy4Xu60SdDI7fkVcfAxAY/Thn4t4YKa4w6mioTUdLK1xfWI5c43UHFIk2aGU5hDCMc74x96DABxJ7Ojrrb7ZCsUxkzkENHZuxt25Vn98GINAehyZ+1HW0CJpihtGOKu8xXseDS1ho84H+2MErOnl3jCM4z9z4949qQwtEdjM0AIROzSRll8/whMpOy4Vuy0S9Ca9mR7oQj0OUO+XSUA+6SJWI6+vghn3Kb0w3/Ern0YDX5cDdFE6SyC53RZkZedkbxPL8rEc4wSdZsM0kiWT4Xjpx+7CN6B+zwsC7O8fNw4ZOzfjnqagp8eRuR9lDS0Sgu4bV0KnnGx8Lg0tQaNOaJHHm16O1jfXxumNd1IZWsLY0NACETs0kb7q+mOUoPvjMrHbIkHfN/Gz8kUrnMZkAROjjr56JO+XaQq6jKJVc37kZn76Rg6W1jla7sxs7OjAmKLVz8L9IngdFskF+EwuROrDeTB7oZ+C59jWX8rkuMNzpH6Vd+vc+W9FOT6bvVsSuy93JE0fjvf7z+zJIGocJHYYTsYd6d1lkfANrQryzJG5H2UNLcKCzhlXePiv1/NnaAkZda5CW0h50wve/PT3Zv3mnlSGljA2NLRAxA4JdyVG0JlxidhtkaBrePdBKWr3AQugrYV4J0LB2YnwWdV1/dO9vZ4MZ8vBYGJZw6Hlve25syYL2105CF6Hz3hDSct0X8SSnTLh5xd4juiaXOFj56nF4+juKHFnSlGOxVDs0t0pU/e+1aPGQWKnMUuCW7T+wXz7oR/JM0dO0OUMLZKFZahxJQBP0HNoaAkadZBsfwT/Js7Y89eYr68Nw3A3NCtDSxgbGlogYkdS652YlLs7Lhe7rEVZBjyT4gOuD/DtkgCuVgBW8RpNer2sChV/Q5e3etUGCzRdTvyrc2n7uSLB6/ALv95qXTpX3j6uZRGRj0iA4yFtzuI8tfgctQau+eAbi0U5FruB2g5p1bLgfn9v9QrGi4e7JHBrknTDCek8c+QEXc7QIinod+HKMr6g59HQQuEZdbxcGQvW9LJnXL/dG8frU+OInlSGlhA2NbRAxO4fTHGysctalGWwCU8HuazmtyG2lePuVZvMBBDtU2UNLaKCzhpXgnAFPY+GFgLPqFPRI/ScM72cnWFheNOe3GWeMrSEsKmhBSJ2dHuaFrNtjY7LxE7bQUHPYb39DbGtHHevH0QmEObI3I+yhhZRQQ8YV5h0BL9FPW+GFo0x6jzW9BqjB4+rmttBiTP2aNdohYc3NRMoQ0sImxpaIGJHC8h86lU67sUuMC4cO23nBJ1HJmKXMnaBo8JvAHs/ShpaRAU9YFzxDAOz4XBo46qwzuovd4YWxqiD1ufVAgZdpVe8IrCssQfhj3H6NJ+/0S/K0BLExoYWiNi5JV6diFXYI7acr2jstB0XdAUFBShwpV/lDC3C79B544q3yqtbDryOxjkztDBGHTT3UzitWtxVHmd6wbif+5uflKEliI0NLRCx07ROjWms48eOHxePnRJ0BQUFEKj2qRFQhpbEoGIXhaxFWQYA/2AKCgowEBD0OLNHaoKuDC2CyLPZ41+Ry9hlLcoyAPgHU1BQgIHk/ZiJoKcMZWjJL3ISu6xFWQbS5HMSIynsAkeF3wD1gK2gkDWyFmUZCNDNZRZlQ2wrx93L8mUCiG1rCgoKmSBrUZaBAN1c+hw2xLZy3D0fTiaAKCyjoKCQCbIWZRlwRPyiRhF9czzA7ETQPRy6ZVEjSjfd4d0WZH9F/dw6b7CnREWB7rCgFRCK+6Woja+JcCxfPeLqhSGtH43xLiFCTYoj7dhId8ewx9/wgoEfo7hxqNhx12doZ5CLPHNUgq6gkDWyFmUZcERo2eFSL6JAAAMosSOVHaq4/E9nRWsCBLsZuvUQ0Iw5su3FhKlTJSwK+y1avwLv76VlGZMUdJ8j7RoQFvSGTTkuZTmWcJGOUukhePwNLxD4MYobB4sde32S2h39qLoreeaoTHE/YlsNLXn5PWWQk9hlLcoy4Ig45YYvwxX8WECJHfl4wSWVO6QIUMXrn+qCViwc4zYRS6uh1bny0qKCbj4UcVUq/Mxy0GoelBIVdPJBONI/IwTdeh9gZZjIcmSJRJPSkqs2OQ618kgiduz1+Wz2i7hex0+lvyGQHke1bS0Cu2FoyUS8VOyikLUoy4AjQifMolvQLwaQYlfWdVznhxQB8hoheaA9BQa4pwCuPFzH5SiXsgWOyNuEA7OF/sQ0kxd0h2NNj0m54z/qlq1JcsxI0JkYxY2DxY69Pkv0qbP5Uz8ICKTHURWWicBuGFoyEXQVuyhkLcoy4IiUyEvXfa+gXzRgxK5APju0RQ7BB3NMwXT9m1mT+thqjIZec3Sp6/DL45ikoHMcibpHCDrBHZYGOY6IyO3BbfiYQyYdG8Fix16f/9rgCQLpcWTuR1lDi3jp18bQ4l4eOIAye7BI0dCilSukfzE97hT8qt8MHtsrt3zo+sQ4OWPPJWxogRH0aLOH5WGUqaEFJHZMjDgwMZWK3RYJOsUPGXfQFrcvfhPDx9Uq0BAPN3caLO/Q6meBV0OWNb6z/TlVRtAvWi03U5ukoFNQjlTKYwR9Ztv4JpTiSOeN0kXwmAO82LExihsHjZ1zfZZoJ6vLNAQ9PY6coMsZWkQFfcYVbPcBZvZgkKahpYwbr9Ro45VH4lQNa0JH11dOg4+j+fzp1FgzJxM2tIAIeozZA7fbQTi3JtkaWiBix8SIAxNTudhtm6Df/pBxhxS7Q5qNRgevL/rLoRYAmjDRPDmjE+moPpv6HQUkROH2+dK89FY9iQu6w/EV91iOFPTRcmpNR/RQgmPJLHWbLbNVDBxzSETs/BjFjYPFTvOuzyZ5h44UIR1BT4kjJ+jkQ9jQIirodXtcH0YJOpTZg0GahpZXHXfxuNLJm69V+6oQIeht4stt45TavXGmrY2/zMmE339BCHqc2cOyyMcUxS1TQwtE7JgYcWBiKhe7bRP05x8y7pBi9+qGBT126dVK8G9C88rCn0jr59b7rO41ZxYWhX20EOh5MpC4oDsc28w6nUcDLYjOHamQ4HiBn1GKPbraYY45JCJ2ixixSyB2mnd93qLFXb/VT+UdenocQ4IubmgRbp+q8X3PPUCZPVikaGgp0GRgG282KZPvESl3MkTMPLiV9to4Ruog0rErI0GPM3tQQR9Y1iBbQwtE7JgYcWBiKhe7bRP0/g8Zd0ixq/kZ9/JHjaxhWZCUZsNJdb5bdn2KtM99/yMhCsWvvulmwBIXdIdjjVxr0Sn3wd2EPjPLc+wyj2Pd8KNZQunoRkw6OoHYedfnxWWr1yz2w+HLM0fmfpQ1tMiY4iIFnQDA7BFCSoYWOtlXnJap0YJOcIXtLjfG6frYODvaw8JAkLChBULQ48weQ7Jkb4DGTsTQAhU7J0YhOD8nF7stE/QfM+6AYudl3AnKpP4KC/b6HFrTwdIa1ycWzU7LzSFFz2qUtKC7HOme9Jq+irxvB9iuAsHxizHffIWNOJkYxkBjx1+fdHHHI88cQ/ejuKElEUGHMHsEkZKhpaB/4I+XnwX9g/wn94ZhHP+ZG/fucMKGFghBjzN7UEzJw2aWhhao2LkxCsKNqVTstkzQf8y4A4rdK/8ipBq8xWgGSSMZpEYDX684ZeQsgeR2W/TcqSNpQXc4PvpV4yJ/n3M8hcpwdG6mB5x6YI95JLWlS4vZ0gUfO+76RAv0sLM2zxyD96OEoQVe0IHMHhzSM7S0yXvYzupnQS/QCldH65tr4/TGG07Y0AIk6JFmD4IRzrhrmRpaoGLnxSgAL6YysdsyQf8x4w4odm7GvUzWsK/uev1mfvpGDqjH4906d35+ilY/+B0QgeB1WCT31bM3USYt6MxbBY1JuXscB+QmWzo3mzjHZ7N3i1c7+FmZPeaRVNEVL0YzezKIGgeJHQZzfRbRDBJhAc8zx+D9KGFogRd0ILMHh/QMLYcrXa+tau0fBf1KX9F5aH1i/L1Zv7njCRtagAQ90uxB4Dx+ZmlogYqdHyMefkwlYrddgv5zxh1O7LyM+6teKxRQzBzpezLcLQfOLgx3drmzJgvbdq5EwevwGW8iQdcfloWLEj5E3xMyZ2rBtwq+oHscl9ZkOLT9V6+iHMnOkRLdOcIef8cLBHyMFuwRdOw09vp86CEteIi4WPPMMXg/Shhakki5A5o9PKRmaHl8WVXb5Zo758cJ+uOK5ne1v8Z8fW0YhruhOWFDC1jKPcLsQTBxopWhoQUqdl6MAvBiKhe7rEVZBkEuP2fc4cTOy7h/FHRdX3m1ArzVK1oU4DoJ/uSytP1ckeB1+IVfabUuyZV34JZGSOo6DL1VCK/Q74bo3rIBOLK1HVJtXMLFyFu9JhE7jb0+L1ulbmTJlTxzDNyPMoaWZExxYGYPBukZWlh3dIygI62g64o94/rt3jhenxpH9EzChpYkTXEYbsY9S0MLUOy8GAXhxlQydlmLsgw24ekgl9X8NsS2csxza9F/RZ45Bu5HGUNLMoIub/aIQGqGFlyhxM3aRgt6xcsTnp1hYXjTntxlXsKGFohJI87swZzL1NACEzs/RkG4MZWJnbaDgp7DevsbYls58rwyQcqxywTCHAP3o4yhBUjQvXQElNmDRcqGlnJn5TeAYgT9cVWj78Uea3qNLVd5jVZ4eFMzQcKGFohJI87sgTGx/IKvWRlaIGIXiJEXOwz2IU00dtrOCTqPTMQuZewCR4XfAP5+lDK0CJd+HWJPx2Q4pKs8zzAAZfZgkaqh5bOq66StI1KBAvbs1AoFqgwVt5AoOqiSjsfOCvCPcfo0n7/RLwkbWkAmjTizh59xJ8jK0AIRu0CMvNgFYiocO23HBV1BQQEK/P0oZWgRLv3q9vGgWuCt8sDMHgxSNbS8rAoVp9DFlbuF1NECd5VXccfdvPz93N/8lLChBWYVEGf28D3u9Fs2hhaI2AVi5MUuEFPx2ClBV1BQAIFqnxqB3TC0ZJLWU7GLQtaiLAOAfzAFBQUYCAh6nNkjNUFXhhZBxPNKDSp2UchalGUA8A+moKAAA8n7MRNBTxnbamjJy+8pg5zELmtRloE0+ZzESAq7wFHhN0A9YCsoZI2sRVkGAnRzmUXZENvKMc9buv4VeeaoBF1BIWtkLcoyEKCbS5/DhthWjnkuuvKvyDNHJegKClkja1GWAc+k2IzZYcECrvjP42eBbD7wO5EF+qcO6QYa3M23fm6dN9hzoqJwSzg6JYibLdO8jChmAcSx3K7qeo3urzgkx+ESRzfHc8O4JoXdpTjSLo1egcmLh1Iqncg07Q7vlIkoTeWPg8WOuz6/+ih24aaUueaoBF1BIWtkLcoy4IiQGgj9qBoILKAEvYwruBOFo73CC1W/jJODIS5mMRy+a9rIthcTriCloCjsIwkvIVnAvY/2TbOFj8OKDsOxXEPM0P/aGun9pyOGoa5/Z4Yx3zshrVrkOJZwUY5SiRaYLGJ574fLjCXXuCSkdv44WOy467NL4/hTm2kQpMZRmeJ+hDK05Bc5iV3WoiwDjsi+2SviNgJmZI0AF1Cr15pe7dCaAE6P8JdQiV6/BOXSamh12unJgaigk4JGXVKmsElkvRlRvgOGY0evIoIfJPHQIYWOKqFnlmPjnvx5LcuRLZuJlK+3H1VkLKnWomOSRYkZB4ydf33emriWZjPc8iLXHNW2tQgoQ0tiULGLQtaiLAOOSJc2EOhFVlT2ACTobb3gFuGlgl7mO41i+IKOKw/XrTGaOWULHJGHFacx0AFzzAGGY0X/xB9VUryIvE3wmz15IMn2tTGX5cgKOtK56Jqh8GJHK1ANQv0gmHGw2LHXJ61SVWyFl+h55qgKy0RAGVoSg4pdFLIWZRlwRA7Ieqdotr6tIA3Us55pUV8gYx2+0ygGEvRRnXT5m1mT+thqjIZ40iSQug6/mFX5gWmGzsNwpM1SywzTjzBHgrVhyHJEgn57QFMrt7ERhBe7uI6NzDhY7Njr85JkVxDr5+BP5Zkjez9KGlqEBX3QGFpR3dZG43PLmhCfQO4NLeWKf8yCGV+fGCdn7DllaOEhYGgRjx0br4+arr+wPXTozzJGLKnYbZGgaw9mqdvttb5v8QQTo1e9TbvWe2fCGXfHFEfaRCzRwfjO9udUGUG/aLX8iy+iUzFYTwG9UKlUVx6vx9UqdCES/CHTrxRHOoeQ1hbPZvPistVrhlUdfg5Bz1yD5R1aoS7ix8Fix1yf6Pml+PyFFu2hiTLPHJn7UdbQItychSnkzqJhU8PAcgsMLewxC2b8aD5/OjXWzEllaOEgYmiBiB0u5o7NSMGJlDFiycVumwRdO0BRKX37Bh0qRm39c1VFMaq6in4YzrijGXO4GNsW6Qs0qs+mfkcBCUG/fb40LxmOEYs8MOPfFbryCo51//D1RX8J2Phd7JE36VIcS0js0BoHr16b5kOrh+61XkjRExE7pGWzKLHzx8Fi51+fSNCf0QySlqCnxJG5H2UNLcLNWexxPaofesN6H2BlmGyBoeVVxx09rkKqwIzfG2fa2vjLnFSGFg4ihhbx1Z8fL6QTH+SFbeBvYoxYcrHbJkF/MPvdh3RW6AW9eoWXrJ7t+zUiGz3D6fbBOV321M+t91ndax8kLApoKWD2fBk4iMpPg63Qa5VPd4Xe0dHjSzjLh7E2nD5/Ehwv8MRf7OFpsWT2DkgL5hTS0WjuX8SIHTMOFDvm+kRTZjdFQU+JI3M/yhpaRAUdE4gSdI28+qpb9hYYWgo0GdjWA7cjM45baa+NY027F+nYpQwtDqDEwo8X1YnyKvgwxhix5GK3RYLepXFJxRRXoPdXxUt71cIZdwcLcjm+W3Z9avn9mCVS7sWvPr0aNSJ7X+GfgJpDyLVXdTv6lT9qlHUAN3N0BWLIcyQhLFF23XC6NqF0dCMmHe2OA8WOvT5Jyn0/xZR7KhyZ+1HW0CJjiosUdII7fC/m3tBSoHdkJSzo3viNcbo+Ns6O9rAwEChDCwcRQ4u4WPjxeqFLwEJwAzBjxJKL3RYJuvNcGTFJsgCN0Ydvkgtn3B3c4VTR0JoOlta4PrFGdFRqDim6mUyk51H5CPjrkKIcqp2jET2nDg4AjsQQ4MQxwhyQyRwCFjv2+vxlpjggjuz9KGloSUTQZ7a9DYaWAs7aYm0ICbo/fm8YxvGfOX0XhqEMLTwEDC3iE6kfFzSpll8/wg9jFNSIJRU7JehCx06669NdskZl3Kf0wnzHr3waDXxdDtBF6SyB5HZb9FyqkXqemKBr3mrdx5Ph6LkUx0satAe8OG8yxzySyvJpMVk+Og4VO/b6pFk+LcVta2lw5O5HOUMLvKCPllNrSh9Xcm5oaZN3sp1VUBS48aP1zbVxeuOdVIaWADY3tIiLhR8XNKm+6vpjtKC7y0KZ2G2RoDfNy2LsotUDlEcFG74fV67AMRn3m/npGzlYWucjvCrAjg6MKVr9LNwvouZMcl+hyw8z7Zv9iK0WGtx1+FImHBG1MrnQXt08xOOqRg9uTtkLT5zjs9m7dV8g7Jt4zYOOQ69OkvLhvFvn9OvMngyixkFix16f1IfzYPZCP5VnjvwKXcrQAi/oDbS2O3ekIt+GlsOVrtdWtdA7dH58fWL8vVm/uSeVoYWHgKFFNHZsXJCgV2IF3VsWSsRuiwT9Fj1TlkomMZ3GA+j+wjsRCt7OETbj/mQ4Ww4GE8saDi3vbc+dNVnYtnMlCl6Hz3hDSYsuzNFF2MO7S0qhSRPsOlxhjrg63KteKxTQdek8tqBrkj7JILInexhnchzJLpKSs4uEPf6GFwicHTF171s9ahwkdtz16eyU+WmXEwhS48iZ4uQMLUmk3Ad3E2qQzruhhe6YrYXSZez4X2O+vja89JkytPAQMbQIiwUTF5Jy78Sk3N1loVzsshZlGfBMaJ2H7rc2HLBaAZ+4VkCUx91boWuDBa5l4V+dS9vPFQleh1/49Vbr0k24UyR1HdKGLBX8qPKBK9evXtwZxFuhI0GneHLOiHIsdv06D8UHfBxOXiZSy6KBa1a4AuCtXgPjILHjr8/9/r/UIYFBWhyZ+1H2/VcypriBhT0C22FoiSjbyIzvGddv98bx+tQ4osPK0MJBxNAi/roEg8YlzhSH4S4LJWOXtSjLYBOeDnJZzW9DbCvH3as2mQkg2qf+TkHXzvGprTC0oAVfpK/KGT87w8Lwhp64nWWeMrRwEDG0SE2kTlycpV9o25rmn5OKnbaDgp7DevsbYls57l4/iEwgzJG5H2UNLUCC7qUjBuRjaVlOdiLPhhaEcmflNUry0mWBce0arfCenC2lytDCQ8TQIjGRenGhhWU+9SodZ2PHbX0WjZ22c4LOIxOxSxm7wFHhN4C5H2UNLcKlX4fDoY3bFtNVnmcYWOIx23/1mmdDi4Zf95H2hwSeoSUwrv0xTp/mTs0nZWjhIWJoETc0MnFxSr86EfNjF9j6LBo7bccFXUFBAQrs/ShpaBEu/Wo5oLO/t8q7w40VbFhTRFaGFu1lVaj4BSH8VR4/rmn3c38PijK08BAwtAjHjotLBzdnCZmRtODWZ9HYKUFXUFAAgWqfGgFlaEkMKnZRyFqUZQDwD6agoAADAUGPM3ukJujK0CKIPJs9/hW5jF3WoiwDgH8wBQUFGEjej5kIespQhpb8Iiexy1qUZSBNPicxksIucFT4DVAP2AoKWSNrUZaBAN1cZlE2xLZy3L0sXyaA2LamoKCQCbIWZRkI0M2lz2FDbCvH3fPhZAKIwjIKCgqZIGtRlgHPhN1tEQu44j+PnwVarKlcITtKQn/VoDF0t8/Uz63zBntObrfFfuiYAwxH3cNhPMc9WvkVv/GU4kg7NpICk6aHYK+uJHbK3OEdMbPvxsFiV2wyu4H2S1EbX/PNUQm6gkLWyFqUZcARIfUQ+lE1EFhACXoZVzev4dI+ZdyopeY1avEw8/fDjmx7MeEKUgqKwn6L1kB4DhzzgBL0AkYVF7OI44gE/RT3Zvkry7GEi3SUSrjAJK5kgdALFyVJrpZFSO38cbDYsdfnBXlgSUnQU+OoTHE/Qhla8oucxC5rUZYBR+TZ7BdxC8NwSV4WUM1Zanq1Q2sCvJJ2t1d6sDxv3R7XnSqUS6uh1WmnJweigm4+FHEVi37gmAeUoJOPF1yQMI4jEvS1cyTHseSrm2mSj8tw1dCkqk2OrWH8OGDscFPpL5J4OGg1D0opCXp6HNW2tQgoQ0tiULGLQtaiLAOOSIkqQFO0H8RGx2294Jb4KdASvKH2xLjMkSPouPJwHfdRXUoXOMJ/HJit4DEHSEEvk4KEcRwZQZfjGBL0IimRzSOpfhCDmH4QZBwsdl3a4IL08sDU0hL09DiqwjIRUIaWxKBiF4WsRVkGHBHZBk+bHB/qK69kX7izoQ9H0GfWpD62GqOh1xxd6jr8ou2BQscugBo8kc8OKUgYyxEJ+tGadPmT44iCd3tA35mXSKfG/R95QeAfOjaCxe6AOASKZst5UElL0NPjyN6PkoYWYUFnjCscRmPcy5j4BPJoaOFNLB+4fGi4X1e5U3Brg69PjJMz9pwytPAQMLQkGTvWsCQVu60S9C/8cZmGoL/qbdq1XsNi94GHXr4RdNzmyRrf2f6cKiPoF63WRdSxB8g2viTjHs+RmuL2cNVhKY50Din5XCIy7gnMISg+g+UdWqEu4sfBYvdglrrdXsvllZagp8eRuR9lDS3CzVmYQu4sGjY1DCxzamjhTCxOg4+gKjwSQSCCfjSfP516yTMMZWjhIGJoSTJ2jGFJLnZbJOhN8g4d3VkpCHpb/1xVUYyqZXyM3y93Vt8Jujaqz6Z+RwEJQb99vjQvb8PHLAAF3WkBFMtxz9h7Op4bpC+QDMcSErtmy1u94paG4b4eiYgd0rJZlNj542CxO0AzfsmLV4qCnhJH5n6UNbQIN2fxjSscGtb7ACvDJKeGFtbEQltwtkMZ3atV+8pJpd0bZ9ra+MucVIYWDiKGliRjxxiW5GK3RYJ+ix6S+61+Ku/QC3r1irQnfiWdDfXaqhZ+v6yxrZnr59b7rO62DxIXhX30MNnrho9ZAAq60wIoluMNTre/nRj3+IsExwusc8Wet8J5jsi4JyN2ixixY8aBYvdg9rsPmazQU+LI3I+yhhZRQWeMKzxG+I+6ZefU0MKaWOg9WV4Fl3k0XUgEHbfSXhvHSNlFOnYpQ4uD9GLHGJbkYrdFgq5dXLZ6zWI/MjQeoGL0iT8qJI1Ck+81v7OtD29uebfs+tTy+zFLpNyLX316NQaOfQAKeo1ejN9x1PD8ca1BcOx6Mt6PyLgnlY5uxKSj3XGg2FFyxZ57faaack+FIyfocoYWGVNcpKAT3GFpyKehhTGxvOBlBB56Df82zs/dGKfrY+PsaA8LA4EytHAQMbQkGTvGsCQXu20SdIKoh2QWoDH68M9d6auIv82dW4bWdLC0xvWJNaInpOaQImNRKYbtKoCC7mTcHURzRPiD518Ajl/kNtNiMu7ZzCFgsQtq3C8yxQFx5ARdztCSiKDPbBsv7PJoaGFNLGjyKb9+RLtwnYlJuzcM4/jPnGbOMJShhYeAoSXp2LmGJanYbZugowX6985aqHfoJCCfdKGuEeNDVIjcuaXRwMcDdFE6SyC53RY95vLrhS9FOEF3Mu4UERyvjSf88Re/8pHh6Ez8D266ITLjnliWT4vJ8tFxqNhlJejpcWTuR1lDC7ygj5ZTa0ofV3JoaGFNLEgUXnX98VtB147WN9fG6Y13QhlaAtjc0JJ07Lzlk0zstkvQi2gGCdtSOMDEqKOvHsk7dHr7lFG0as6P3MxP37yfZ+eWKVr94HdABKLmTHJjPZMbjD3mASfobsZd4zk+rmp04X5vnBxhysYfekKU47PZu8UrHLqui8m4J+bDebfO6deZPRlEjYPErmleFglHh1q6hWXS4Mjcj7KGFnhBb6C13bkjFfkztLAmFiQKlR8FHW9++nuzfnNPKEMLDwFDS9Kx85dPErHbJkF/QM9cxGz6HYDEDu9EKDg7ET6ruq5/uqnpJ8PZcjAbDoe2NRkOncfJO2uysG3nShS8Dp/xhhI0V+4HjnmACbqfcec5Vtztrm+nhrG3Z3hve0Q5kl0kJW+XU3TGPcGdMnXvWz1qHCR2SONamCN+4rwo4dCh+IVStHnmyN6PkoaWJFLug7sJNUjn0dDCmFhI2rbzfcpd+2vM19eGYbgbmpWhhYOIoSXp2HnLJ7nYZS3KMuCZXLZK3YicFw+oWgFY4WrU2fCyKlQOvR/xVuh1y4E7vSxtP1ckeB1+4ddbrcuD4DEPMEH3M+48R2+Frr09nRjG6ZN3RpQjX4ckOuOeSC2LBq5Z4UbIW70GxkFi59Qe6eJHlQO3XMcPr0tgkBbHUMZM3NCSjCluYGGPQF4NLa6J5R9Mcdqecf12bxyvT40jekIZWjiIvP9KOnbu8kkydlmLsgw24ekgl9X8NsS2cty9apOZAKx9qoShJRlB187xqbwaWlwTi/OYHdr6hOEK+tkZFoY37cld5ilDC4fUBf0fYucun2Rip+2goOew3v6G2FaOu9cPIhMIcwx0P5QxtAAJupeOGJCPpWU52YkcGlo8EwstTvKpV+m4ny7T2Hfo2LF6RDY1EyhDCwcRQ0vSsWMMS8Kx03ZO0HlkInYpYxc4KvwGsPejpKFFuPQrb1zxDANLPGb7r15zZ2hhTSxO+VBHuj1Dy2OhUEC6USg4q78/xunTnNRw1JShJQARQ0uSsdMCW4RFY6ftuKArKChAgb0fJQ0twqVfeeOKt8q7G6IxG9YUkaqhhTOxdHCDD1cJvFXeldvfwz1zP/c3PylDCw8BQ0uSsdMCW4SFY6cEXUFBAQSqfWoElKElMajYRSFrUZYBwD+YgoICDAQEPc7skZqgK0OLIPJs9vhX5DJ2WYuyDAD+wRQUFGAgeT9mIugpQxla8oucxC5rUZaBNPmcxEgKu8BR4TdAPWArKGSNrEVZBgJ0c5lF2RDbynH3snyZAGjbmoKCQvrIWpRlIEA3lz6HDbGtHHfPh5MJwArLKCgopI2sRVkGPJPifsnfevDVN83LcDNDoJ0IuodDTStfPeI9I2EdvMO7Lcj+ivq5dd5gT8nttqC7XYsHF25jXx4JcHxsr5jdFj5mY9vdUSLFkXZspFyKTXbXDIMkdsr4MYobB4sdy+ui2ULX54+7nGCQFkcl6AoKWSNrUZYBR+SC3UvYReKAps+k2hPj1iwIVVL+h1TajxB0px4CmjFHtr2YcAUpBUVhv0V5kT5IpG5hkoLuc+zo+oqph+ChYVm2U/NBjmMJF3YolXCxSVLXou/VtYjlBQI/RnHjYLFjee2bZgvHMazoeeYIaIrbVihDS36Rk9hlLcoy4IgctJoHbgW/WxPXLWwmKHbkgzakJ3+GBZ1WLBxbQ1ynqqHVaacnB6KCTopvdWlJzUtclypJQScfhF2bNBNoc6UPMMakb8IYF1uW48iUXtw3e0XcGuKnPhcQYGIUNw4YO59XkzySNSNKduSZI+C2tdSgDC2CyLPZ41+Ry9hlLcoy4Ijgoj+uLtDqRsVWeIkOKXZlWqyvpkem3GlPgQHuKYArD9etMRqTLnCE/3CaWPXNhFPu+E+HI1ma04ZBHGiy3bI1SY6MoHdpU4gf+1xAgIlR3DhY7DhehFtUM7I8cwQsLJMalKFFEHk2e/wrchm7rEVZBiEyri5c0qR0iX6wgBG7Avns0BUrUb6woDNd/2bWpD62GqMhnjQJpK7DL7qyM00tSUHnOBJcOYv2IOqWpUlyRIG7PaBLcsqoaLZ+6HMBgX/o2AgWuzCvAxJBHnnmyN6PkoYWYYzG55Y1aXw3nkdDi1au4H7NtIt2jKGFNb2sT4yTM/akMrTwEDC0gMRO+8ClX8O91sqdgvtGUyp2Wyro6LP4/OV3x/MB2eKWZtyplIcFfWjVB8s7tPpZkDZP1vjO9udUGUG/aLXw5EhnjwTnEIIXpgXQR8yUe0fSuVIc6RxC2yM8mKVut9f6qc8FBNgYxY2DxS7E6+vH2EEgPY7M/ShraBFFw6bGgGX8eC4NLWUkAwX0v7YWb2hhTC9H8/nTqbFmTipDCwcRQwtE7NzmLEFFf/SL8MvFbnsF/RnNIMkKutMe51X/1GIEHc2TMzqRjuqzqd9RQELQb58vzUuylH0mKdyEBZ1rAVQgb9LDcBZ/MhxLSOzQczJdvR6gu6sU0dkjEbHzYxQ3Dha7IK+IBFKuOTL3o6yhRRQN632AFWASP55LQ8urjrt4XBEliDO0MKaXe+NMWxt/mZPK0MJBxNACETvaPrUdutSvVu0rp/WtXOy2V9C7iQu60x6nzazTWaAJc+FPpPVz631Wd9sHiYsCepw0e4RVk/RqTFjQX7mM+6qsRaBuOb0MJThe4Amj2CO31oPZ7z6ktkJfxIgdfOyCvA4i3irkmiNzP8oaWoQxwn8QU0fceC4NLQWaKGvjDTVxhhbG9IJbaa+NY6QOIh27lKHFAXzs6HxaXgWX6GXN62UvF7vtFfTi837CKXenIX2NxCEm5d5wUp3vll2fWn4/ZomUe/GrT67GPr0YkxX0mp9xf1zhh8swZjZ+gNYgOHbx1N+l838qcwgXo7hxoNgFeV208MI1iDxzDNyPMoYWOdwRCYgZz6ehhU72ZIcsRdjQwphebozT9bFxdrSHhYFAGVo4iBhaIGL3QpOckblO5+fkYrelgp60KQ7DzUbTV1c1fRWYj9jrc2hNB0trXJ9YI3pSag4pkkyRaeIXXX2zFZpE4DlqRM/9t+kMkJ5TFQDgSF4qOzH88WEMAv8wh4DFLsAL6Xk4B5FvjjGCLmJokcLMtiOcVd54Hg0tBfow/eILepyhhZpe7g3DOP4zN+7dYWVo4SFgaIGIHRLt8usH+2DmwxF0udhtqaDTLJ+W2LY1DCcb/eibS/m/iWaKNJJBajTwdTlAF6WzBJLbbYFXedR2RBA8Dc5RI2aOSD1feKs6GY6XdLJ/wKmHNAWdjVHcOFTseF5dM1LPc80xXtA3NrSIY7ScWtPRt+M5NLS0yXvYzsoXghhDi/sIfrS+uTZOb7xxZWgJYHNDC0TskGi/6vrjt4IuFbstFXTqw3kwe6GfgBO7GqdwXsr9Zn76Rg6ol+PdOnd+YopWPwsnOy1sziT3GJoemUxRkil3l+NjTa8xb308jrMJe3OJc3w2e7duErppXhajF7BJ+XC8GM3sySBqHCR2LK+LvtmP2Mal5ZtjvKBvbGgRRwOt4c4X34/nz9ByuNL12qrW9oQgztDiPYKvT4y/N+s3d1wZWngIGFogYodEu/KjoMvEbosE/aJUKrXwhgs8Uzo7ZZLaiaAF/N+MoD8Z7pYDZ7eFu8nizposbMc+JnodPmN+LXZxl6igexzRRVgle2KovnscEcXzIYazsBPlSHaRlOguklu0NsDH/R94gYCP0YI9go4dywtdnj38wqSUwkNLehy/S7lvaGiRweBuwhmhQ+N5NLQ8vqyq7XLNnfPjDC3eI/hfY76+NgzD3dCsDC0cRAwtELEjKffO9yl3ydhlLcoy4IgcuBloEqP9fqK1Anj/txa1QkeLgonbuIRgafvLWcHr8Au/3mqxvBIVdI9jxX2rQK9DjyPSBAr3HhTlWOz6dR5ovY5uGg7wQIy81WsSsWN5dd1rNYXXCulx/DWmuIFlRSTd3fG8GloYZ3ucocV7BN8zrt/ujeP1qXFETyhDCwcRQwtE7P7BFCcbu6xFWQab8HSQy2p+G2JbOe5etclMANU+VcbQIolzrvhIYDyvhhZcoYQ+UMcZWvxH8LMzLAxv2pO7zFOGFg4ihhaI2DnxCW1bw3AFXSZ22g4Keg7r7W+IbeW4e/0gMoEwxxhBFzG0iGJAsg9Ly6JZCC8dERjPo6Gl3FmR5o5BQ8vjqua/+2ONPddohYc3NRMoQwsHEUMLROxoYZlPvUrHudgVmNp/orHTdk7QeWQidiljFzgq/AawpV8lDS2iWFqT4dD2XrF6hoHAeP4MLZ9VXdc/yewfMLRUmCKwnLHnj3H6NJ+/0S/K0MJBxNACETu39KsTMS92jyicSPMLBedJTTR22o4LuoKCAhTY0q+ShhZR3A0ty7LDhoHAeP4MLS+rQuWQHgYMLewqjzf23M/9zU/K0MJDwNACETtN69SYxjpe7K7cmLpnRGOnBF1BQQEEqn1qBJShJTGo2EUha1GWAcA/mIKCAgwEBD3O7JEalKFFEHk2e/wrchm7rEVZBgD/YAoKCjCQvB93wbyhDC35RU5il7Uoy0CafE5iJIVd4KjwG6AesBUUskbWoiwDAbq5zKJsiG3luHtZvkwAtW1NQUEhdWQtyjIQoJtLn8OG2FaOu+fDyQRQhWUUFBRSR9aiLAOeSXG/5G12ZY85wBX/efws0I0j5UpV12vh4ryzse3utqifW+cN9pzcbgu6YZJ2OUxsp4zfRe4QkW2vmN0WHiwPIzmOLBd218w3vGBwh3fERDTb9MfBYldsBncDRZSnyjNHJegKClkja1GWAUfkwt/3yh3zgBL0cgEpXQ2XdijXdL2A/tcO/E0Ny7Kdeggj215MuGqUgqKw36I1Opzi2H1cDuEh9FNQgk6qV1RxiaOOrq+YeggeLIu0Zjm3JpIcGS6krkWf1rX4jhcInJoVIbXzx8Fix/Li4sghzxwBTXHGntz/12+FMrTkFzmJXdaiLAOOyEGreeBWm2SPeQAJOlLxaofWBHgl7W6v9GB53jHpKTDGhYiXVkOrcx2gRAXdfCjiSgikcFM0QQ1O0MnHCy4m2SbNBNp8PxoNCzr5mCJ+chwZLs9mv0gqk/xQ+hsCtKrk2BrGjwPGrlfELS9wLw8ujhzyzBFw21pqgq4MLYLIs9njX5HL2GUtyjLgiOCiP64usMc8gAS9rRfc8kwFWj65HeqIR5Ptlq2RysN1a4xmTukCR/iPA7OFP1IR9DItJkmW5l6zJw9U0AcWaZcgw5HhUqK1NJvp9YMYxPSDIONgseuSPpsa7djIxpFDnjkCFpZJTdCVoUUQeTZ7/CtyGbusRVkGITKsxiUp6If6yiu36NTUj2xxiwUdad7MmtTHVmM0xJMmgdR1+EXbVyGCtwe3EedhOBbIZ4dZlV85i3YfQ7Lsa2BpkOPIcPllHRvBYkfrghbNlldG04kjhzxzZO9HSUOLsKAPqaUjmI4AM3uwSNHQwhl1ylePTL9mBh+4rChJFF5V9SrXkE0ZWngIGFpAYsfEKAjXlLU+MU7O2BNK0EPHPmBi9Kq3add6DQv6Bx56iRb0OzK3LNFMMr6z/TlVRtAvWi2Sjab3XSncKAiyje8L077pI+aZc0p6XkhxZLiUcJcnr4kjC/g5ZGjVB8s7tEJdxI+Dxe7BLHW7vZY3cbhx5JBnjmxzFklDi4SgT7Cr4z0wDGb2YJCmoYUz6pC1Q4SgO40/kFocrlaVGud4UYYWDiKGFqDYuTEKwDNlHc3nT6fGmjmlBD107AMmRm39c1VFMaqW8TF+h95ZRQu6szAa1WdTv6OAhKDfPl+al+5SttRttpgVnwtAQefaNxXIm/QQRk5TShmODJcmeYeO7rJ0BB1p2SxK7PxxsNgdoFmj5KRUmDhyyDNHtjmLpKFFQtAjO6GDmT0YpGlo4Yw65CE7LOi0NWcbj7+in7jSP5mTytDCQcTQAhE7JkY8fFPWvXGmrY2/zDkl6KFjH0DpaL16hXvlYIU7XKEnq1Ut/A4do245ff7q59b7rO62DxIXhX202OlRqbvAelDshZ8sAQX9lcu4++8ZWNCXsZoUR4bLLXpg7rf6qbxDR9P/IkbsmHGg2D2Y/e6Du0Jn4sghzxyZ+1HW0JKMoAOYPVikaGjhjDo1PTLlTu/V8gqdrSDZuMJLwld3KagMLRxEDC0QsWNixMM3ZeE26GvjGCm76raWoqCTp98KSaPQ5HsttKdLwy0A8cMlwrtl16eW349ZIuVe/OqbTGavG34RCyjoNT/jjh5fPiJ/oYnDSp4j5XJx2eo1i/1w+BJKRzdi0tHuOFDsKLliz3uzzMfRQZ45Bu5HmZtRRtBH9VF4GMrsEUJKhhbWqEOsLGFBf6EJNJxHe9RrV229c1jwNtMqQwsHEUMLROyYGHFgTFk3xun62Dg72sOiTqAEPXTsAzRGzEvlsANcI3pOZ8ihNR0srXF9YjlzjdQcUiSZIgdfYSMOnKAzGXek55zLxoObcQfgyHBJxwH+D3MIWOzCcyMXRwd55vg7BJ144iKcVRqI2SOIlAwtrFGHSnlY0NGkVH79oMLxqut6+2Pl64YytPAQMLRAxI6NEQvWlHVvGMbxn7lx755Ugh469gH1Dp0E5NN7TYUW6OGM+8Jb8TQa+LrE6T5nQG63BVnlOTfZQ3iZByfofsa9osfouZdxl+EY4oIW6OF8dFJZPi0my0fHoWIXsdjppZGFSI/j7xD04WJsuy+6eECYPTikZ2hhjTqvZNaJFHSk449ULA6vHl/0mp/YVYaWADY3tEDEjosRA9aUpR2tb66N0xvvpBL00LEPmBh19NUjWbTSNHsZRavm/MjN/PSNHMwm7IWHlwcj8g6IQNScSeaLZ9Ms4j97t/jJkj5Ds4ATdDfj/lhjJwefI8bE8veRiHIMcCmiGSRsUUnMh/NundOvM3syiBoHiV3TvCwSjvt8HHnkmeNvEPQZTjH8f/autStxHYquls4npC556CgWQfQqL0EF8e2ooP//H92cpI+kD5XktLWQvdYdS8pd457TZjenO+cMD8PpCAoMs4eA7AwtvFGnS2/KWEGvBWJxWjc/7k9904s2tIiQMLRgxE6MUQDelEU3rv07X756JzdU0I8qlUoHNlwciccikMQOdiKU3J0IH3XTND+8W+fBcrcckIvwkO6VcfXuypnczr2Vg+R1+AScyD0G1yHdeVH5fkeJgqD7GXdyEdZpIVim7z5HI1j2UMhyFLjckWdnajz9mhcK3B0xTf9TM24cJXZkbuwAR3hS4eMoosgcf4OgM9x60i0Az+zhIzNDC2fUYW6dhJT7iZvO/TAXp89mkFbThhYBMoYWjNgJMeIgmLL+WbPlmWVZ3mb0DRX0bdvFtngsAqtWAKh4g72hel6Uajv+V/zV661XysK7Ph/nwZJd8jr8hNdbnWPXPfyzmg8Kgu5n3GtenxZ2HfIr9Edh7pTlyHM57lR6cfaiVGpZtCZeAx2DW72GxlFi59av6MGTihBHAUXm+HsE/Sqy98lANXtwyM7Q4hl1WJOFhrkIKTpvuCqZz9UXs3vaMN2ZSRtaBMgYWjBil2SK401Zf6yz1xtrb7lv7bKTGyroP0Mhq/mtiHXluHnVJnMBVvvUPAT9mj0+vznhyjIGjtkjBpkZWlyjzn3QBlH8m9zHb7ol6uQERKJKnsTdJbo2tAiQMbQgxE6IEQ/elHVwAKL+ajx4S3Qt6F+gkPX2V8S6cty8fhC5QJrjLxD0R+fwgtstyqUjcMwePDI2tPBGHYNLud8vGuy9GCta8mHW3W88k9V5zdtqqg0tAmQMLRixC8XIj13IlGWckdU5bEin0IL+Y+QidhljEzhq/AbwpV8VDS2ygj6cQJFXx9umHBgGkMwePDI1tIhGHYMT9Jrf1dgtK+rVw3g3G7WFt71ZG1oEyBhaUGInxiiIHW/KIvjP2n+YzV7ZBy3oGhoamYMv/apoaJF+hz68JVP/xLOt8Ct0HLMHh0wNLaJRx4hboZN1HjT+COpbvSyCvSna0CJCwtCCEzshRkHseFMW4GYWbFzTgq6hoZE5dPvUGGhDS2rQsYtD3qKsAoR/MA0NDRxICHqS2SMzQdeGFkkU2ezxUxQydnmLsgoQ/sE0NDRwoHg/5iLoGUMbWoqLgsQub1FWgTL5gsRICZvAUeM3QD9ga2jkjbxFWQUSdAuZRVkR68px87J8uQBr25qGhkbmyFuUVSBBt5A+hxWxrhw3z4eTC7AKy2hoaGSOvEVZBSKT8lbF3xpz1O7Ydno7EYJiTbAd4R12IoQ71gs7aJqHzmGLP6e22yJo+ndX+baIk4qg33+UWAmjag12VUQayjk+LpQ5Blw+ByR20WaGqeyUuYIdMTHNNoNxtNiV28FuoPL2kdeUWUSROWpB19DIG3mLsgoEIkfcXtct2+5U+jE7X9EEnbYqqdPyP26tgLCi0xoXE1rj4mI+v50EJS4MaVHYIo8pwIv1dCrDBspBavUQjGqJPLE0oHZkFeoeNIK6Bx6gigfBoTNR5chx6TGO37UnRoFbsyKidsE4WuxovY6BW6+D1pzMSNAz44hoiltXaENLcVGQ2OUtyioQiGx32ttehck2lbx2tPcRmqDTH89QKZlV8+tGwvfoQGGEJsyYj06LHPDlpWUFnRZt6rFSjEQh+lsxNZzQGtA0zPoJq3vwQlv6nkaeWhyH/rgm/NQ4clwubag52c5E7FhVyXGklQc3jhi7fhlaXtAeEMdQlyobQc+OI+K2tcygDS2SKLLZ46coZOzyFmUVCERADvyS0fTH902C1AS9SruLsnr71Ui9/RErHT12bmnl4SYs1R+VCxzBHy4vonlxco4m6OQZxStBVWI9Hrrhrn9M0IcObZegwpHj8sQ6G3aiS/S0+kEME/pB0HG02PVYswvWA2JgZ5Zyz44jYmGZzKANLZIostnjpyhk7PIWZRVEyIg9ILZtO/INHLEr0Z8nVMqTOuKNWA4T2vhOnUlz7LQuRl6NabUGT59U8i7tTryeI3HcMRd+6Wi3u1+kje+ILvtaIA1KHHkux+yFQsV9r8Ahl46NaLFj8l1mTOmVmY2gZ8eRvx8VDS3SmI7nQplQHymYIrI0tAgmlgTTDje+/Gv9PeBPaUOLCAlDS6qxq56UvLruirHLW5RVECEjCnpMd2LUFrc048561r/HiB1rCEQbOj46jjO+mgdzqoqgH3U6cIM92e2j406/HVV1HI4vZvf+eVHvgqqXWNOm5zBHhmuai1DhyHMhMSw/fQqdDWN5YYA8dA0fr+hDV+I4Wuzu7Eqv1++AALDZIxtBz44j35xF0dAii5bjzPlGHj5SMEVkaWgRTCwJph1ufHc2e9i3ltw5bWgRIGNoSTN2rCcuFXTF2K2zoMcs8jAFfYdm3EHQX0iEIoI+pu/QW3M6kV40p9dBRwEFQb98OraPaeK9bd91+uSa7KfU9a9rfizq5DqsV+EY3qGfLGIF3WtjpcCR50Ji+ERmkKwEnWjZNE7sgnG02G2TmbFCY/dE0++ZCXpGHPnmLIqGFlmM6TPJWOjDAkjHFJGdoYU3sSSZdrjxG+vAWFr/uJPa0CJAxtCSZuxOF91TNw2qFrt1FvTtuJw0oqC7zepJHGpxgn4xd5zJfDJ2vI1rb9Om3/pJWhTIcsfuU6mr2P1t2tL3m9S09GsFs35KW/S+kOtwYZqNRSPyDp3Cb2Mlz5HnQmLYy1DQbxPEjhtHit2dPejdsRV62w7W6SKKzJG7H1UNLdKgq7emMw8Np2GKyNLQwptYkkw73Di00l5ae0QdZDp2aUOLi+xi564M4VAtdmss6EQePqN0EQW9wQJFU+4n0ZS7Mb2eH46HLEP05syb147jeO9/FFLu5c8BvRor7JrsRXswY12HH/CjRlNFLPneMIPejAEmLisFjjwXmnLfyjDl3kpIR3vjSLHrsbmRziEDep1mmHLPhGPoflQxtKih6e6+CJCGKYIhG0MLb2JJMu1w4+fW/nLPOtj9A8JAoQ0tAmQMLWnGjvueUuyM9RV0oufRF1qYgu5m3L+MkbdYGDnXw0dn3Jw4F2xYaQ4p00yRyzXGKIB6Hb4H507NRcxv42XcVTjyXH7ZHIIWO5cjfVCx7QrBwO58EzsMZMfxS0FfzdCihqtI2jYVUwQgI0MLb2JJMu3w4zeWZe39N7NuvJPa0CJCwtCSZuzY99hySSV2xtoKes+O1XNEQXcz7t7PSBaFgizQ4fJsteC6hL1d7hJIbbcFXeW12U12xxa3PLDeodOL7oMt1A1q7vgq467CkefCsnxGhlk+IyHLx8axYscJOrOMUYS/VWSOXwr6aoYWNUQeXlIyRWRpaAlMLEmmHWF8d3l+Zu2f+ye1oSWE1Q0tacYO4Am6SuyMNRX0o4E9iNlqYWAKuptxd30OH2adDZ/P9l+9rwxbc2fifbgmq59b5nyXN2fS+YLcYmV4mw5rg6NO1D2Mw/HEXNzTd+jsMquSK7LhfoXn6GfcAbIceS7Mh3Nn9yPfSsuH8+Ycso/T+WQYN44Su7Z9XBYTR1kWlsmC41eCvqKhRQlNZz4MDaViisjQ0MKbWJJMO+L48q/173z56p3UhhYREoaWNGMH8AVdIXbGGgn6UaVS6diDSuWIrs/7kNSsRFbpaILuZdz9nQhuNB4sb8vB26HjOG/+pXflTG7n3kwjeR0+Ab+Om3yAnRcVO/oKHYsj7LYoubstPghB88Pblx5wDDLuFNIceS7uTpnvdjmhwN0R0/Q/NePGUWJ32bE7wHHgj2Rb+jV9jl8I+qqGFhVM595jSYA0TBGArAwtvIklybQjjP+zZsszy7K8Dc3a0CJAxtCSZuwAvqArxM5YI0Hf9rKY21QTGL65DhUE3cu4E5xArQDv6SpYvV7PR7cX3F/2OA9yRZLX4SdsEO24NTrKd1AbIZrIRKuHACreYM6A50WptuN/hV+hPwpbhGQ5Cly2Bj+pQ4KD1oSrQ+KvXkPjKLFz65D0goVqVs1ZsuKYLOgrG1oUQPS8FRlMwxTBkI2hhYGZWH5gijP+WGevN9bect/aZSe1oUWAjKElzdixQaYhKrEz1kjQf4ZCVvNbEevKcfOqTeYCrPapKoYWedw6MXqeiinCRSaGFgrXxJJk2uHHDw5AGF6NB2+Zpw0tAmQMLWnGDuAJukrsjA0U9ALW218R68px8/pB5AJpjgmCLmNokcV0wjum0jVFZGloMTgTS8i0c79osFdhoXHjjKzwYFMzhTa0CJAxtKQZO0CJ2xQsGztj4wRdRC5ilzE2gaPGbwBf+lXR0CILsj4/pB19W97H9EwRmRpaeBOLaNqp8UfcuPGftf8wm72yD9rQIkDG0JJm7O5LpRLR/FLJXblLx27DBV1DQwMLfOlXRUOLLMjcz8CMVamaIjI1tAgmFsG0w63yhHHDuJkFm5+0oUWEhKElzdidmi68M7Kx04KuoaGBAt0+NQba0JIadOzikLcoqwDhH0xDQwMHEoKeZPbIDNrQIokimz1+ikLGLm9RVgHCP5iGhgYOFO/HTTBvaENLcVGQ2OUtyipQJl+QGClhEzhq/AboB2wNjbyRtyirQIJuIbMoK2JdOW5eli8XYG1b09DQyBx5i7IKJOgW0uewItaV4+b5cHIBVmEZDQ2NzJG3KKtAZFLeqvgbJtmuhJjyVHjFf+4/Sqwib7ULJVKjxXmHt4eOM/E6ChwK9avUdltQXkGBo8vQl3A4mj52oOgBRVTr/9u3rDO6v0KV49FdhW1s4uMoII2dMlewI2b61Tha7Mpspww9rrDIZVP6NSuOWtA1NPJG3qKsAoHIkbfvlWCrw+ohrNKxcbXjKkhcA/qtVaGJScNtYsJhSKbLEflvbBgX8/ntxGlyJyVFgefFShCTT4Pwt7AEvQSo0xJHJSh6UCp9GCE8WNafv5Z1rsyxDBJHn1T4OH7FCwVuzYqI2gXjaLErQ72OgVuvowJFSSqVSKHQQnNUFHTrj9r/XwSsq6GlKL+nCgoSu7xFWQUCke1Oe9urNrll35WhokVE69AalzTM+gmrCXBi1snBO13I8nh0oDBCE2bMR6dFDt64k7KCzvFySw8fR8tkYwk6/fFMu8TyZQk57FpQnnAPpmI1jkTt+lusygMfRxFpVZscO6PkccTYQYPpT5ZQiSdoFJuj4ra1XARdG1okobccrnIsjQ3etgZy4M+TNAu9bXcidJEEvWuWvPJMNZOuW+thyRux/gFj55ZWHm7CUv1RucAR/MF4MUEv01LSIjAFvcq6xCYI+o11Rv58nZEluhrHtl3xaAhxFJBWP4hhQj8IOo4Wux5r3EF7eWQp6NlxVCwsk4uga0OLJHRRoFWOpbHhhWXEefKTtQoSgNSz3lz4RfVP6WCVG2EYsRwmtPGdOpPm2GldjGDSpFC6DhmvCn0Bu5UaxxL9ecLaAxFB3zkNpSAM6CFwAz/+kB9KHC/tjvBYkpWg/6BjI1rsWG3JMmNKCF5uh70PgCJz5O9HCUMLjqDHGwYcHxeFNLQY1Rpn1HmH8qHRfl3Vk5JXO3T51/p7wJ9L2dCCI+gZxk7C0IISOy5GAqqn956UKMYub1FWQYSMIARHnU60yRNOjF7MLutaDx8+zFKtVl+chP6mEWsIdA1lpR/J1Ti+mgdzqoqgC7xiMu6obXxZxt01xZXCk8gfa/l685/xYD2ocXyy20fHnX7bU/WsBJ08dA0fr+hDV+I4Wuzu7Eqv1+/QeLE5pPLd9YmB7DjyzVkkDC0ogp5gGCCjgENnUkxDi2DUcRt8hG/G+6AY+O5s9rBvLbmTKRtaUAQ9w9jJGFowYncvFGznUAODNRV0xditq6BfPh3bxzFLIJwYdc2PRZ3EqE4V/ZTcXaXI8nVM36G35nQivWhOr/nGjtKCHuJ1GZNxxxT0HZZxJ4JeqnUXkSwEEfQbyzqngq7CsW3fdfrk/uq7ZDIUdKJl0zixC8bRYrdNZv8Ki12FiHu7E8pLAIrMkW/OImFowRD0JMOA49Af106rmIaWFxO6eJxSFWctOLsRDT1ddE/dV2M31oGxtP5xJ1M2tGAIeraxW93QghE7LkYi6MKJCrpa7NZW0LfIg2U/0t4JLR1t1k+hV475YsAKvVH7iK7QL+aOM5lPxo63ce1t2vTaB8mLQojXU0zGHVPQX1jG3biH55VqnfLlQAT9wRd0BY4Vu79N2xM/eZ8zE/TbBLHjxpFid2cPenfuCv0IJpJyP7pqLTJH7n6UMbRgCHqSYYCJwtBxhsU0tJRYoqwL6zh2T1YX4SU653WBVtpLa4+og0zHLglDC4agZxk7GUMLRuwS/UgN00u5q8VubQWdXIufAzutLEqJGeFqkEapsfsrYoozptfzw/GQZYjenHnz2nEc7/2PQspd4DWIybhjCnrD5J9Taq68+6Ap9wOWclfhWGGMel4/6UxT7q2EdLQ3jhS7Hn32Kvc5ar3o41iROYbux1UNLRiCnmQYGNFlXwsko5iGFja70MTsM3usLoWfrrnvnVv7yz3rYPcPCANFyoYWDEHPMnYyhhaM2HGfRVALMhV0pdgZayzo9IE5EibUGL3D53C8RDSduQEX6fXw0Rk3J84FG1aaQwJesRl3REH3Mu4u3sP3LWeKU+Loxu3TM6b8IlMcWuxcTj2uLfhn1IhTZI5fCfoPDC0Ygp5kGGC4pg8sRTS0lCDLDlpeoxNO9eU9fsLxxOLGsqy9/2bs9gSkbGjBEPRMYydhaMGIHfscFXRmh2N/qsTOWGtBN/rRSxHrHToNzgcs1L8UdLJAh8uz1YLrFVJG7hJIbbeFzys2444o6F7G/Znjy4NtWzNg25oSxzZTuTsv9ZDttjUjYUsXG8eKHS/oxyG+AYrMMVHQf2ZoQRL0WMMAxQVkbY1CGlq69D3sycIV9BfTvP9S0I3d5fmZtX/un0jZ0IIk6BnGbnVDC0bsAHGC/kJnViboKrEz1lTQyzQ2T6m90joxF/f0HfopxOu5So9Zcvp8tv/qfX3YmjsT78M1Wf3cMue7vDkzxCs2444o6F7G/cWs71CO9FmTHDTYwp0Vlvln/XW/L8txy4Z1zlHHmzqyLSzz5hyyj9P5ZBg3jhK7tn1cphy3IIL9S3r8Gf5WkTkmCvrPDC1Igh5rGKBwH2GKaGjZWZhmY9HouoJe+1bQYfPTv/Plq3ciZUMLkqBnFzsJQwtG7ABxgt6lE60r6AqxM9ZI0I8qlUoHNiAcwa01gA/pbemCnQglthOBxGsBxw125sHythy8HTqO8+ZfelfO5HY+dz9KXochXvEZdzxB9zPuVY6vQffNuNekW/rV22QhzRF2kVTYLhI+jl/xQoG7U6bpf2rGjaPE7pKsAYAjmKLLHF8RReb4Rcr9J4YWtJR7jGGAYuLmH4poaGG7ZBtw39GU+8nXKXfykD1bnlmW5W1oTtnQgpZyzyh2MoYWjNgB4gSdnXQFXSF2xhoJ+rZX2oHE6BNei3SOYy5ErFoBH1ArgLlSdlhzFvdlc7BCv56Pbi+4v+xxHuSKJK/DEK/4jDueoL/4JjihrkWwQjeMA2jOEmyalOVYvoM6D/QRmY+jiDQal7SgloWnaf7qNTSOEju3DkmPPoKVe0FdCwFF5vilKe57Q0uapjiAl7UtqqEFJvyF8SNTnPHHOnu9sfaW+9YuO5GyoSVNUxwAO3YyhhaM2AHiBJ01zmiQpaFa7Iw1EvSfoZDV/FbEunIscmvRn6LIHL8U9O8NLXjb1qKGAe5cYQ0tkCeDR2r3MTuybQ3gicXBAQjDq/HgLfNSNrRgTBpZxk7G0IIRO0CMoN8HrS3VYmdsoKAXsN7+ilhXjiKvXJBx7HKBNMcEQf+poQWvsEzUMACYOEHR0AIaWqonC/ZajxWW+TDrbJxLl4licUZWeLCpmSJlQwvGpJFl7GQMLRixA3AxEmIXvEOXj52xcYIuIhexyxibwFHjN4Av/SphaMEs/RoxDARZW4rCGVrgFZ/5wWZ/t/SrKwu+oeW+VCotoM2xu3L/z9p/mM1e2YeUDS0ok0aGsZMxtGDELhSjmlgGNhB06dhtuKBraGhggS/9KmFowWnOkmQYCHzS7FPBDC3Pi1LNry59As1ZPCXwV3mnXtbWO3MzCzY/pWxowVkFZBc7GUMLRuxCMUpcoUvHTgu6hoYGCnT71BhshqEll7Sejl0c8hZlFSD8g2loaOBAQtB5w0Augq4NLZJI5pUZdOzikLcoqwDhH0xDQwMHivdjLoKeMdbV0FKU31MFBYld3qKsAmXyBYmREjaBo8ZvgH7A1tDIG3mLsgok6BYyi7Ii1pVjkbd0/RRF5qgFXUMjb+QtyiqQoFtIn8OKWFeORS668lMUmaMWdA2NvJG3KKtAZFLeqvjbK8rbR17TWxF4xX/uP0q0+E9Q5GfHEDBsjbytlM1D57DFn1PbbcF2vpbbqe62EHhVT0pmXBtfwhAAHYtVOR7dVVjJpqN2x7a/3+WEgyvYKRNTmioYR4tdKF4+XwFF5qgFXUMjb+QtyioQiBzx+yVpTb80Bb1KBM5sQIscVoa3VA9KATFMqdRRQb+Yz28nQZkEQ1oUtojUVYgswB5RWg9hkF49BJ7XvbC/lcPImYwI3pQ5lmFTL31S2bLtDnCMKnp6jUsiaheMo8VOjFfAV0SROSoKOn8RrqtBThtaiouCxC5vUVaBQGS70972qxQeQ/2mFAW92jDrJ6wmAJTfNaBlwon4NzXn4+aIzZKPTstoOm/cSVlBt+/K8LQyoMfQsPjz254X8oJOf1Bep4vuaVxPAWPky4AaR6J2/S1WaKtNH1faMWU60motOqYZhoRxxNhx8eL4iigyR8Vta7kIuja0SKLIZo+fopCxy1uUVSAQgenRF/SBnW7KvWuWvBI/TPiqXqdRH1DmyNU7qDzcdMZk5lQucAR/bNsdA2SdNhP4tueFmqAzXkDtG0FX49i2K768UT6Mowh8sWMVqIaRfhDcOFrshHjxfAUUmaNiYZlcBF0bWiRRZLPHT1HI2OUtyiqIkPEF3baNNAV9x1z48l2iYyd+p1Eert5NnUlz7LQuRjBpUihdh5909crYle3ONz0vZDmGeSUJ+kWTdjJU4ngZYbFNIygCX+ySOjZy42ix4+MV5euhyBz5+1HR0CIt6NPxXOgJ6yMFU0SGhhaxf/E7lH6N9lrjjC7Lv9bfA/6cNrSIkDC0yMeuG8RO7EMdgPXgpklexdjlLcoqiJDxBJ3NHukJ+ovZZV3r/TORjDuFt4B9dBxnfDUP5lQVQT/qdGirhDu70uv1O9+9h1V7sAx4JQg69cTBzabC8cluHx13+u1A5WK6E6cwh5D4DB+vyAr1NnkcLXZcvKJ8PRSZI9+cRdHQIivoLceZcw0+AqRgisjS0FIlEl4i/3Xhg9ucJazonNFldzZ72LeW3EltaBEgY2jBiJ0QRw4nCxbTF+XYraegP9H0ZnqC3jU/FnUSl7qn6DuRjDuFn5G+aE6vg44CCoJ++XRsH7tvzbfJnVaJvEFHFXSOV4Kgj27Jmoj2PlLg2LbvOn1yf/V9havQeVJEKmJHtGwaJ3bBOFrsgnhF+XooMke+OYuioUVW0Mf0mWQs9PIApGOKyM7Q8mJCF49TquKsfWo3ktHljC431oGxtP5xJ7WhRYCMoUU2didUJt7pbiE+jjxOaDe2GhiQ1WK3poLepj0N0xP0klk/hV458ERF8RKbcedeMTcPnbdp028fJC0K5JHZ7rPk0J096N2lvELneMUK+hTS7cNDtrST51ix+9u0JbGn4ttxOelUxO42Qey4caTYcfGK8PVRZI7c/ahqaJFOudP7renMQ8NpmCKyNLSUWKKsCxtl2T1ZXYRFgTO6QCvtpbVH1EGmY5c2tLjAiV3N/IAfdQgNH0cBdMvzqblQjd2aCvqA/kxT0GmMan7qpBGbcQ8E/c2ZN68dx/He/yik3MufA3o19uhjczm1OYSC4xUr6Ay39JZT4Fhh91fPS1kSqfuMfiuldHQrIR3tjSPFjo9XmG+AInMM3Y8qhhY1U1zTcUIjaZgiGDIytLCbrwZC8MyWESV/NcHB/d65tb/csw52/4AwUGhDiwAZQ4ts7FhT1Cq1XfFxjOIdHtWUYmesqaBDx/pKZWB3IpMI6v31HpjkYjPuvqCPnOvhozNuTpwLdkJpDinTTJHLtRc1c+AJOs/rC0G/gnSYCkeXi/finOh5NO+Q0xyCFjs+XiG+HIrMMUHQZQwtaoJ+FUnbpmKKAGRkaClBlh20vEZvxOrLe7woeDfpjWVZe//NrBvvhDa0iJAwtEhPpB9mqVarL2BpxMcxgvvFApIuKrEz1lPQmSWHIvwNrHfoNCAfbKGemHH3Bb3VguMhuSjdJZDabgt3lZe+oPO84gT9mt18b/BaS4Vjm5G48xausXqeWpbPSMjysXGs2PHxEvnyKDLHBEGXMbSoCXrk4SUlU0R2hpYuffd6snAF/cU0778UdGN3eX5m7Z/7J7ShJYTVDS3yE+lp3TRLNKnOx1HEzsuz+cxqjSrEzlhPQXeRXsr9xIRnqfuFd/twmenz2f6r//0R58i8JqsfeAdEIWvOpNfck22XqVOlHL+YxRN0/k0CJ+g+x0fnkCzppnOPlizHLRvWOYQLvFke2IOj2G+l5cN5cw7Zx+l8MowbR4kdHy+er4gic0wQdBlDi5KgN5lJk0cqpogMDS07C9NsLBpdV9Br3wo6bH76d7589U5oQ4sICUOLwgq9UftgK3Q+jiJOTNOsu6PysTPWSNCPKpVKxx5UKr4apFhYBnYflPzdB3xm+sFytxxMR6PRHCqjuo+TV87kdu7NNJLX4RPw67AF7GXH7lQqNjXZikATdJ/XfalUWpiNUon5cHyOw4njjEaO/0ZLmiPsIqmwXSRkfd6HFyaVbx5UUODuiGn6n5px4yixE+LF8RVRZI4Jgi5jaFER9OD5MkAapghAZoYWtku2wYxVp9WXk69T7sY/a7Y8syzL29CsDS0CZAwt8qY4amKkpjg+jmFU3xss46sQO2ONBH3by7JvByPplX79gDoAcR53f/XadFz4xVHnQa5I8jr8hA2THbfOA6tr0Uvt1Q/H69Tr08KuwyALMbwlkjAJ7kBZjuU74EIfkXteHL95lYCD1oSrQ+KvXkPjKLET4sXxFVFkjgmCLmNoURB0ouetyGAapgiG7Awtrgv6J6Y444919npj7S33rV12QhtaBMgYWjAMjQwsjlFU6dY2ldgZayToP0Mhq/mtiHXluHnVJnMBVvtUFUOLvKDfOjF6noopwkVmhhaoSgKa4D5mR7atATxBPzgAYXg1Hrxlnja0CJAxtKAJuhvHGNBVvErsjA0U9ALW218R68px8/pB5AJpjknb1iiySblPJ7xjKl1TRMaGlurJgjV0ZIVlPsw6G79fNIIdNrxz9Yys8GBTM4U2tAiQMbTIGxqfq9RwxfxIfhwNLnZV+uPFf3UrGztj4wRdRC5ilzE2gaPGbwBf+lXR0CIr6GR9fgj9fEct72N6pohMDS3wio9WEzP80q+udNe8o5DRxfjP2n+YzV7ZB21oESBjaFEwNC7AcEVVnI8jF7sXiNvC9D3I0rHbcEHX0NDAAl/6VdHQoiDoDMzWkaopIlNDy/OiVNvxRk+gOYu3FPdXeSGji2HczILNT9rQIkLC0CIdO9Z4pUajJMQxiN07EXxz8RykV2RjpwVdQ0MDBbp9agy0oSU16NjFIW9RVgHCP5iGhgYOJAQ9yeyRmaBrQ4skimz2+CkKGbu8RVkFCP9gGhoaOFC8H3MR9IyhDS3FRUFil7coq0CZfEFipIRN4KjxG6AfsDU08kbeoqwCCbqFzKKsiHXluHlZvlyAtW1NQ0Mjc+QtyiqQoFtIn8OKWFeOm+fDyQVYhWU0NDQyR96irAKRSXmr4m+TqbDNFmmVfjV9sC0I9x+laPGf172/lrX/AIfNQ+dQqF+lttuCbQot9+jOi+iX0uD4DjtloqWpuNKvqhyP7ipscwzPUUAaO2WuYEfM9KtxtNiV2368guJp4f5cReaoBV1DI2/kLcoqEIgc8fteK1D0oVKJFPNDE7sSoM6KBVRhE2Ej6EvG8LpvWX/If3uGcTGf3064xmvSorDVIQ8pRBZgz2QZ6iEM0quHIHB0a1mEFZ02Z5nQ5iyKHMvwCEZVnOf4FS8UuDUrImoXjKPFjo8XK29OWEYKkRSZozbFfQttaCkuChK7vEVZBQKR7U57uxIIeqSOHwOW2NEfz7SyT7Vh1k+C6oseDqy/r1Dyx9o1Hp2W0YSe4T5kBd2+K0O1B9CBJ3tQphU8IqUZ8TmyapPdyCX66EDxhyaoghpHonb9rXKE41e8MMCqSo6dUfI4Yuyg4fInXZS7Zc2Pv6sUioHsOOptazHQhpbUoGMXh7xFWQUCEZCDbAW9ykrvEp2Lyjn0GP0HP/5aS1p5uAnL2EflAkfwx7bdMYAiVYN2Wv0geI6sH0Q10g9ixHokjJ1bRY5tu+IX2eI4ikirH8QwoR8EHUeLXY81f6C9PJigl2kZcBFF5qgLy8RAG1pSg45dHPIWZRVEyPCCfrkdfj8JQGqgQ3+eUJnbMRdxem4s6aT0as1ejakzaY6d1sXIbxyudB1+0ma+aTd44jkmdWwcsTwttCpW4nhpd0LSxjiKyKVjI1rsWO3TMmVaoeaOrTXjyN+PioYWaUEftkaO8PLAG8cze/jI0NBiVFn50MgxD258+df6e8Cf04YWERKGFvnY1bh4JZiR8GKXtyirIEKmIs4hlWijIMz2xCzj/mJ2Wdf68F/1z/rz8PB3BrF5dBxnfDUP5lQVQT/qQHcgQvETPh2n24KZcSyZp9WXd76lL8OINT26htLZKhyf7PbRcaff9lXd5SgCfw4hDyTDxyv6QJI4jha7O7vS6/U7weQYk3EvNEe+OYuioUW62xot5B4VdDyzR4AsDS1VIgMl8l83dMyDG9+dzR72rSV3UhtaBMgYWjBil2RGwovd2gp6pdfuRNZ9qGK342XcPxZ1Eot6RNHJo5b1hzWpv2hOr/nGjtKCfvl0bB/TJ8k2fYdO7rg0Bd3lSAT9hVyFEUEf03forTkVCwWObfuu0yf3V78scvyKFwaIqBEtm8aJXTCOFrttMjNWAl6XMRn3QnPkm7MoGlpkBb05HzdHMYKOZvbgkKWh5YROL+90xwl/zIMbv7EOjCV75edCG1oEyBhaZGP3YkIHllOq4klmJLTYraugH8G8We5HnywRxY69WCZiVz+l7W5D6WiyQt9/+MdW6DRX9DZteu2D5EVhizw791kyjDw8DzqD1N6hU/gcT2txgn4xd5zJfDJ2vFymJMeK3d+mLYlpvAKOIlIRu9sEsePGkWJ3Zw96d9wK/Skm415ojtz9qGpokRV0IBAn6GhmDx4ZGlpq5gf8qEMnNf6YBzcOrbSXsL/mRqZjlza0uEB6d8maonZh8kwyI6HFbl0FnaEXnTIRxa7BAlVisaiFU2AP1hn58/UvzZ68OfPmteM43vsfhZR7+XPArkaWpB5EZ8s0OJ5WX06iKXdjej0/HA9ZFkyBY4Ux6nkpS5+jgJTS0a2EdLQ3jhQ7djGW+368BjEZ90JzDN2PKoYWFVNcvKDjmD2iyMbQckoPqtSuwx/z4MbPrf3lnnWw+weEgUIbWgTIGFrkBZ0+edHJM8mMhBU7Y70F/TNqxMETOzcb7cXrPZxG+cPegzxYD3AxXg8fnXFz4lywk0pzSJmzb3z/8IzBMek6ZGg6c0ONoxs3Ll7lqEUlnzkELXbhOT82415ojsmCvrKhBV/QccweEWRlaPkwS7VafXESPubBjd9YlrX338y68c5pQ4sICUOLvKC/w49nEPQkMxJW7Iw1FXT3xrqLLvPwxM7NRrNUCkTkQ/ybeEFvteC6HJKL0l0Cqe22CFZ5ZIEezU3jc3R/RjJFFGSBDregCsd2NF797zIPGGDZPCMhy8fGsWIXFvTYjHuhOSYL+sqGFmxBxzJ7iMjS0HJaN83STvSYBze+uzw/s/bP/VPa0BLC6oYW2dh16Tv0k4Ur6LFmJKzYGWsq6E92/5K+k/0MfwNP7NxstHFiLu7pO3T2Sut+0WCpsD3r7NUwzmeWuwXhmqx+btlCQd6cSS+6J+/iK5MZJGpXSYEj83J8mHU2fD7bf/W+MiSz5MT7IMtxy4Y1AInXdoRjIi8MML/Nm3PIPk7nk2HcOErs2vZxmXJ0VwKxGfdCc0wSdAlDC7agY5k9RGRoaPkwG7UPf4UeHPMQxpd/rX/ny1fvnDa0iJAwtMjGbmdhmo1Fo+sKeqwZCSt2xhoJ+lGlUunABpkjd1dCJb1dJEaQjQbVM0slfxcJiRdT9t2ZNfvzx7L23f/hypnczufulSh5HT4BPzJ30OvwjjxjUhNqGClwdHdbuDacB8vbVvF26DjOm397SXPk4iVw/IIXCtwdMU3/UzNuHCV2ZM7vAEf3CSw+415ojkmCzrCSoQVb0LHMHmFkZWipMTOVa4oLjnkI4/+s2fLMsrzVhDa0iJAxtEhPpGxXcwPikmRGwoqdsUaCvu2VB4AYpV3nIchGkzB8QE0A7+2yv0I3dllzllfv/3icB7kiyevwE15Hdo7d9wqdSi/OapQGR+ME6iF4M0iwQr+ej24vuL9MlmMZnk4GdEkgcBSQRi2LFtSs8ATAX72GxlFi59bo6LkyHp9xLzTHrwV9JUMLuqBTqJs9osjG0MIbq/hjHvz4H+vs9cbaW+5bbNusNrSIkDG0KLmLjVNzYSSbkbBiZ6yRoP8MhazmtyLWlePmVZvMBVjtU1UMLakIOoLZIwaZGFpWFfSDAxCGV+PBW+ZpQ4sAGUOL0kRKFugQriQzElbsjA0U9ALW218R68px8/pB5AJpjgmCLmNoQRJ0Lh2BY/bgkaWhpWs+V6lR50Q8NriUYGjcOCMrPNjUTKENLQJkDC0KE2n1ZMEac4bMSOixMzZO0EXkInYZYxM4avwG8KVfFQ0t0qVfR6MR0e3RiK3yAsMAltmDQ5aGlp2FuQCjTiN0bHCmndC48Z+1/zCbvbIP2tAiQMbQIm9orJum+cGUWzQj4cduwwVdQ0MDC3zpV0VDi3TpV8cFm/2DFTqW2YNDpoaWHda8oxo+5k074rhh3MyCzU/a0CJCwtAiHbvnRanmbzIUzEj4sdOCrqGhgQLdPjUG2tCSGnTs4pC3KKsA4R9MQ0MDBxKCnmT2yEzQtaFFEkU2e/wUhYxd3qKsAoR/MA0NDRwo3o+5CHrG0IaW4qIgsctblFWgTL4gMVLCJnDU+A3QD9gaGnkjb1FWgQTdQmZRVsS6cty8LF8uwNq2pqGhkTnyFmUVSNAtpM9hRawrx83z4eQCrMIyGhoamSNvUVaByKS8VbG5ikZHd5XUCjeZPnagyg9FWActH7vQUeCwxZ+UFYXy9pHXzNcwPge2fRxtbJgGR+MddlvE9Fq7gh0ldA+JKkc/XgJHHmnslAl+/6RxvNi1g91b/LGAInPUgq6hkTfyFmUVCESO/H2vgDJsoByk1eIW2rEQ1GkhgJLZgA+h7qlE0P8A/lr7xsV8fjsRClLKigKtVeiKXc+2K0QWVmkzLc3RrYcQUXS35sPUUOXIxYvn+AUvFAS/f9I4WuxofZUBq9fBH4soMkct6N9iXQ0tRfk9VVCQ2OUtyioQiGx32tt+PwgyY/a30iv+Y5r0xzOt1FcKtz1isCz648w6MB6dltF03riTsoJ+DDWbmNhd2lBLs/2t8GFwZBULu5FLlFVlHDsjQ5EjHy+Oo4i0qk3S3z9pHC12W7TB9Cftc8EfiygyR8Vta7lAG1okkcwrM+jYxSFvUVaBQATkwBd0onNxco4rdlXWXfRLQX+1rFdaebjpjMnMqVrgaGD76WhW3ajciS7R8TmyngLVSE8B1jdhCH0T1Djy8eI4fsULA9zvnzSOFrsea3ZB+1zwxyKKzFGxsEwu0IYWSSTzygw6dnHIW5RVECHjCfql3YnXc6zmR/TnCWudQwR953THCOMP3Up7YJ0ZxtSZNMdO62IEkyaF7HVo24Yndsc2Lc5YYT944HNM6vrHdTZU4ijEi+MoIpeOjWixY4zKlCl/LKLIHPn7UcLQgoMsTRHZGVqMao2WBmXHJyUzbhHBfWf51/p7wJ9L2dCCIl4X40PHmbQi48NbGKdd0vM0tKDELtGMFIwrxi5vUVZBhEzQ4Kl9dNzpt6OqjtnilmXcXVNcKSZIBsu4w2rIccZX82BOVRIF9ifhWn76DDoABsDnSJ5Zqi/v0Y6NI6c5fLwiK7xbNY58vHiOX/HCAP/7J41jxc64syu9Xr+zFT4WUGSOfHMWCUMLCjI0RWRpaKmS6b5E/uuS43s630QFnfvO7mz2sG8tuZMpG1owBL01ZzEKPXhC11tnRP4b52xowYhdohkpGFeM3XoKetu+6/RJjPrfrICUxG6HZdyJ2pVq3YW5qBpR7ELGneCiOb0OOgooNHiCVK0v6E9kBklX0Hf8twov5GqLE3SiBVMmFgoc+XjxHL/ihQHh908aR4od4URm/Mpl9JhHkTnyzVkkDC0YyNIUkaWh5cWELh6ndMY/XXRP417zcd+5IcuIpfWPO5myoQVF0Gk3vNugx62LRwe6tDThaSxXQwtG7JLMSNy4WuzWVNArdn+btrtNKR1N4TarN+4h3V6th9PRFDeQcQc0D523adNv3yjdxhcem31B76Uu6C/+W4VagqDfBmIhz5GPF8/xK14YEH//pHGc2JFV+aB356/Qg2MBRebI3Y8yhhYMZGmKyNLQUmLJwC7cgEm+He470Ep7ae2R+UemY5eEoQUn5Q5/NJ15aHjk0CT7mFyouRpaMGKXZEbixtVit7aCTmPUi6ZREMWu4XWjp6i58i5i32JvQ96cefPacRzv/Y/kdTig/LiU+1bKKfeG91bhtPpykpByb7npXAWOfLx4jl/xwoDw+yeNI8Wux+Z8dw4JjkUUmWPoflzV0IKBLE0RmRpamID7N2C8oPvfObf2l3vWwe4fEAaKlA0teKa4K/roxWPEcuxwoeZqaMGIXZIZiRtXip2xtoJOf35GL0Q8sfMy7i7e465pL+M+cq6Hj864OXHcnsyS16FtVwgGdqeS/hwC8Dj+wBSnxJGPF8/xK14Y+MH8jxY7lyN9AOOPRRSZY4Kg/9TQgoEsTRFZGlpKkJGF+/BLQQ++c2NZ1t5/M+vGO5myoQVN0Kfzedj/MIK3JXC95mxowYhdkhmJH1eJnbGmgt5mN9YdW/jxwBM7L+Pu3mQfZriyjBFk3FstuC6HZEJxl0By1yGzGlF4WT4jtSwfwOPo/kzYtmbQDKcKRy5eAseveGGA//2TxpFil5ugZ8cxQdB/amjBQJamiCwNLV36HvZk8ZWgC9/ZXZ6fWfvn/smUDS04gn7xeO1cX4RHx/Qdemuet6EFI3ZJZiRhXCF2xpoK+pbdOaLvZFN6LQLwMu4vZn3HMO4X7DmMHDSChbuXcQdck9XPLXvYVLgOAbwP587uR87jc2SejQ+zzobPZ/uv9IB5jd6cQ/f7shyj8cqysIz/+0/nk2HcOErs2vZxmXLcEo9FFJljgqD/1NCCgSxNEVkaWnYWptlYNLpfCbr4neVf69/58tU7mbKhBUfQW2T9fXgbHr2YO85kPhnnbWjBiF2SGUkcl4+dsUaCflSpVDr2oFKBFSvsRKiktxPB4DLudFdCyd2VQPcf+Peal3GnuHImt/O5eyUiCLq3UybyzJICR29XhcvswfK2Vbi7gbyNJNIcI/HKtvRr0//UjBtHid1lx+4Ax0HoWESROSYK+s8MLRjI0hSRqaHl/nlR75Kpxr0DY4tZ8d/5Z82WZ5blLyhSNrRgpdyHVxPBxE4xvZ4fjodsu1qOhhaM2CWZkYRxhdgZayTo216mFuJSvoNaAZElAaLYvfgmOKFugLBC9z3uFI/zIFeEIejGFtSyiOp5GhyNE6h74E0i/gqdPFRDvY5gY6gsx0i8smrOIvz+/uoVjRePS1a/ohw+FlBkjomC/jNDCwayNEVkbWiBrU8L9yihOqX/nT/W2euNtbfct3bZcMqGFjxT3NBxIkl3AHO/52lowYjdD0xxSrEz1kjQf4ZCVvNbEevKcfMqheYCrPapqxpaMJChKSJzQwvkAr1lQ6Kgu985OABheDUevGVeyoYWPEE3DoXCMR7IAh2SK3kaWjBil2RG4sdVYmdsoKAXsN7+ilhXjpvXyyMXSHNMEPSfGlowkKUpgiEzQ0v1ZEEbH1Jwgs6nBIXvnJEVHmxqpkjZ0IIxaQxpqB4dh4WMSyUZw9Y8qDeTl6EFI3YhM5Ifu9C4dOyMjRN0EbmIXcbYBI4avwF86VcJQwsKMjRFMGRkaPmom6b5QWf/+1KptICWzWyVF5h2uO8A/rP2H2azV/YhZUMLxqTx6ExGo7n/ejyI3RsJHK0ix5CXoQUjdiEzUo0/4sblY7fhgq6hoYEFvvSrhKEFBxmaIigyMrQ8L0o1twHUqemCzf7BCp37DsXNLNj8lLKhBUPQr0ZEt+cxsbuej2759+o5GVowYhcyI3HZFWFcPnZa0DU0NFCg26fGYDMMLbmk9XTs4pC3KKsA4R9MQ0MDBxKCXlzDwE+xGYaWXARdxy4OeYuyChD+wTQ0NHCg78dvsa6GlqL8niooSOzyFmUVKJMvSIyUsAkcNX4DtKBraOSNvEVZBRJ0C5lFWRHrynHzMrS5AGvbmoaGRubIW5RVIEG3kD6HFbGuHDfPQ5ULsArLaGhoZI68RVkFIpPyVsXbCBoUOAr30MHZiWD62IGqASUzrnAT7MIgGBm0o8Bhiz8nKwrl7SN/FwnHVwRegaP7j5JfnIo/9jFsTqEqIwbHo7sKq9VUYZHLpvTrFexmCjdsFMbxYsd2x7DjXnAsoMgctaBraOSNvEVZBQKRI3/fq1eCuNKPFgvAEvQSoA7lf+65PaECRlAoYTR6g35B89uJUNFQVhRofUIm6DxfEViCXiUPKmbjJHLMgXa7oIKuyLEMMj6gtZoqUJCkUokUCk2xDklE7YJxtNiVoV7HgNXr4I9FFJmjFvRvsa6GFv73tP7k93ukiYLELm9RVoFAZLvT3q74gk5/HKdVzc806Y9n6C56uuiexpZWHvlT5KPTIqrHdxGSFXTKiAk6z1cEkqBXG2b9pBo95nENdZ2ooKtxJArX33IrO8STMtKrFDqmGYaEcbTYbdEG0580afRkD8q0+so3pb8xkB1HvW0tBpthaMlF0HXs4pC3KKtAIAJyIAp62bYj1X8wBb1Ku4uCzH0j6FB5uOmMycypWuBoYPspd56vCCRB75olX8L5Yx4Tx0u5q3Fs2xU/VBkKOuvZMUzo5UHH0WLXY81JaJfGCnvSbH/XDwID2XHUhWVisBmGllwEXccuDnmLsgoiZPyOjfQF7BZrgSQAR+xK9OeJ3100SdAvmrRk4dSZNMdO62IEkyaF7HVIn1SCSoxpCvqOufA1nD8W4DiGK+hKHC/tTvDoRUhdboe9D4Bcum2ixc59DKNMf9rgCQPZceTvRwlDCw6yNEVkaGjhezTfdxdCmVAPnJln+df6e8CfS9nQgiLo0/FcKM/r43cYWlBiZ7xDiddQrzUGz6SkGLu8RVkFETKiwMVk3FFb3NKMO0WCoFNPHFyIj+RgfDUP5lQlUchG0F/M7v3zot6tho55uHY4+qcKxye7fXTc6bfLjBSdQiLZ6BTmf/LMNXy8Yk6ApHGs2Bl3dqXX63fgmqxAby64QDMQ9Ow48s1ZJAwtKMjQFJGloaVKZKBE/usasI4wF3wjDw+cmWd3NnvYt5bcyZQNLRiC3nKcOd9Ax8cvMbRgxM5rwhJVdN+kpBi7NRb0y5iMO6ag79CMO0WCoI9uyTMn7Qt00ZxeBx0FpK/DJ5q2zUbQu+bHok6uw3pVPObxSF+8MkFX4di27zp9ck/12XuESq/d4dfssbwwQESNaNk0TuyCcaTYkbiRhUCFrlLb9B36VicjQc+II9+cRcLQgoEsTRFZGlpeTOjicUqVoGu+GPDnsyGCM/PcWAfG0vrHnUzZ0IIh6GP6PDlmnes5/BZDC0bsWJvUbjQbH5iU1GK3zoL+FJNxxxT0l+CWihX0KaTbh4ds2dM8dN6mTb8FoHQbX5gUsxH0klk/hX5AMH3wxzzGtNehK+gKHCt2f5u2JIYWT0egeeW+HWnPlYrY3SaIHTeOEzuyQh/07tgK/bJDVkCdQSbv0LPjyN2PMoYWDGRpisjS0FJiycAuzcnSuebUXIT+Js7MA620l9YeUQeZjl0ShhaUlDtdeTedeWj4txhaMGLHNKO6iCzRA5OSWuzWWdAHMRl3TEFv+Bn3eEFnuKWX45szb147fq9f2etwQPllJegf8KMGqSL+mAdLf7mCrsCxwu6vHpf26kUfx1JKR7cS0tHeOFLsGKEynUMM9oJhEA1fkTmG7sdVDS0YyNIUkaWhxZ1gakEliFN3p40I93vn1v5yzzrY/QPCQJGyoQXPFNekthwev8XQghG7Z7YkKoVXRpxJSSl2xhoLemzGHVHQuYz7V4J+BWo3cq6Hj864OXHcvr6S1yF7HTmwO+4kksEc8g6f+WMejgM77SfOXJGjy+OTM998Ro04ucz/aLGLzhvbdifyrSJzTBB0hu8NLRjI0hSRpaGlBJla0ANf0CP3ovs9NhHdWJa199/MuvFOpGxowRP0q8jrkt9iaMGIHYlP9eWdfzBj4E1KKrEz1ljQYzPuiILOZdxjBf2aTSpv8Mqn1YLrckguSncJJHcdHgWGYTaQ7jt0etF9wOKcP+YwdXyocXTTXXewUD/mjr/ihQGWzTMSMrRsHCl2UUEnC/Sos7bIHL8S9B8YWjCQpSkiW0MLvIc9WQRCEFnkuaPuRLS7PD+z9s/9EykbWvAEPfLg+WsMLRixI/F5Mc37iKALJiWF2BlrLOixGXdEQecy7rygn8/2X+nBo3NIljvTOTg6ANdk9XPrfVDaPpnNHHJiLu7pe/NT8djgODI0/YdqWY5bdueIvkPfhnmyf0mPP7/mhQHmt3lzDtnH6XwyjBtHiV3bPi5TXuyyLJMZJGL7LjbHrwT9B4YWDGRpisjS0LKzMM3GotH1heA0fiNpMBEt/1r/zpev3omUDS1ogt5kJmIev8XQghE7Ep9anKCLJiX52BlrJOhHlUqlA5ssWHooPuOOJ+h+xv2+VCotzEapxHwOD5a75WA4gYS047/tuXImt3PvakUQ9BBfDkgcYbdFyd1twR9zHBkCQZfmCDtHKtwukkq2ZVGb/qdm3DhK7Mi80QFeVMXvyPrAvvv2lRAKMuP4laD/wNCCgSxNEVkaWgyWkG14ek2m/ve438kX9H/WbHlmWZa3oTllQwuWoAfrnwC/xdCCETuacj+JptwFk5JC7Iw1EvRtLxvN4hKfcccTdD/jfur1aQmvXoe3ZLqcBDPL4zzIFSEIeogvB6x6CB9QD+ElcvzFCl2aYxkUbsCWAZk2LmlNuFoW/uo1NI4Tu0tWy4Kq+HGn0ouzGhWa4xeC/hNDCwayNEVkaWhh8J3tRM8jbRUoPEH/Y5293lh7y31rl51I2dCCJOhEz1uRwd9iaMGIXZIpjjcpqcTOWCNB/xkKWc1vRawrx82rFJoLsNqnrmpowUCGpohMDS0UZJHHFnY1M0HPfUE/OABheDUevGVeyoYWHEG/dWL0/NcYWjBi5y4DI9vWeJOSSuyMDRT0AtbbXxHrynHzennkAmmOXwj6TwwtGMjSFMGQUcodCrsuoLkjWZ43zAanB/eLRvA2nXfnnpEVHmxqpkjZ0IIh6NMJ73b7fYYWjNixwjIfZp2N+7ELmZSkY2dsnKCLyEXsMsYmcNT4DeBLv0oYWlCQoSmCISNDC7zyMj/o7E/W53XavZmt0mvey76Qmcf4z9p/mM1e2YeUDS0Ygk5CdAi7YEct7+PvMrRgxM4r/erqth+7kElJPnYbLugaGhpY4Eu/ShhacJChKYIiI0PL86JU22GHNc+047Zq8VZ5ITOPYdzMgs1PKRtakASdgdmOfp2hBSN2ZC3e4BrrBNkV0aQkHzst6BoaGijQ7VNjsBmGFrRta6tAxy4OeYuyChD+wTQ0NHAgIejFNQz8FJthaMlF0HXs4pC3KKsA4R9MQ0MDB/p+/BbramjJRdAzRkFil7coq0CZfEFipIRN4KjxG6AFXUMjb+QtyiqQoFvILMqKWFeOm5ehzQVY29Y0NDQyR96irAIJuoX0OayIdeW4eR6qXIBVWEZDQyNz5C3KKhCZlLcq/raRcnvl3RarHJs+YDvCO+xECHesN4xhc+qVRW0eOodCDSRZUShvH/m1LI7aHds+jilmgVfg6P6jxDbHVE/vYV9MROun47m3o0SV49FdxS/WxB8HSGOX0xXsiJl+NY4XO34H0OeAxC7alLLQHLWga2jkjbxFWQUCkSNuHyithzBYrR7CaoJOKzvUafkft1ZARNFpNwEq6Bfz+e3EaXLnZEWB1ipkgr5l251KP2bnK5qgV0vkiaVBy1fQbgJRQW85ztyt+aDIsQyNlwdbkWMe6dUhiahdMI4WO+Ga7JFjErvv2kyjIDOOioK+rmYqHutqaNGx+/o4Q+QtyioQiGx32tte9cUt2sz307Yj9QKwBJ3+eIaSyqyaXzcavmuoN0wF/ZEcNaEvug9ZQT+GWoVM0Nu0pWE7pmQHVnOWhlk/ceseUKZRQR/TvgljKLasxpGoXX+rHD0WkFal0HHQXCY6jha7J9p2eZuq+KUNtTTb0ZYXheaouG0tF1HQhhZJ6NitciyNDd62BhLgCXqPNRPop1W4iQl6lXZQZfX2q5F6+8bE8VLuUHm4CX1UH1ULHA3sIOVOuaXYJIg8o/hFohtmfMqdJduduSpHom3luGMBafXyGCb08qDjaLGrsLrRtPUFq5xW7kSX6EXmqFhYJhdR0IYWSejYrXIsjQ0vLOMJOpO8st2JKAOO2JXozxMq5Ukd8QzHMVxBnzqT5thpXYz85uiy1yHt7LTNrey2vV5PHHA47piLoOkDfYKJEXSKJjBV4njJReoyJmoMuXTbRIsd3+DpmGZXyNBT+FtF5sjfjxKGFhRRGLZGjvDywMXFGHoZU59AIQ0t1S6UBmXFXqu14JgH953lX+vvAX8uZUMLjqDHmz0cHxe5GlrkY8fFq3pSMrkGOj44k5Ji7PIWZRVEyPgNj+7sSq/X73z3HlbtoYvmoVnP+vdoz3rXDkf/fCRX4/hqHsypktehVzj6y07FSBxfzO7986LeBVVnV1qSoF8pc3yy20fHnX67HDr+ihcGRk5z+HjF3A5J41ixq7COVbQhJblOy0+ffDtmD0XmyDdnkTC0YIjClE78UUFvzZlh4LGghpYqNO9osOYd/DEPbnx3NnvYt5bcyZQNLSiCnmD2IKOAQ2eSr6EFI3b3QrF9DoFJSTF2ayroxjaJUCWm4j6ioO/QjDsI+otp3kcE/ZG+lGSCblw0p9d8c0Dp97DwKoEX9JhFHhLHrvmxqJPrsE5fK3wYyYLuLv4UOLbtu06f3F/gfOCPv+KFASJqRMumcWIXjCPFrk3foW91XEF/IjNIVoKeEUe+OYuEoQVDFJrzcXMUJ+jO2xAUY1JQQ8sJvQ3f6a6aFxM6epxGbLjcd26sA2Np/eNOpmxowYhdktmDZjqZJSlXQ4v8yiiI1+mie1qKE/TApKQWu3UV9Dt70LtLeYXuNqsn8anFCfoYPHGeoEOu6G3a9NoHyYsCMOIEfTsuP430WsGsn9IWvS8g7pCLSBD0puP2MpTnWLH727Q98ZN4/BUvDJC5/zZB7LhxnNhddshKpzNou4Ley1DQM+LI3Y8yhhYMUQACcYJuXMAfCGYPHhkaWmr0idqogxaU6M1I7snQhMN9B1ppL609og4yHbskDC0YsUsyezBBHzrOMF9Di/xEGsTLXQFGf6HApKQWuzUV9B6LUWqmOIoGCxRNuZ9EU+4sNeQK+pszb147juO9/5G8DgeUUCDoRPY+o9/Cug7p/FCDVFGDXoPxgj6dw4O1ocSxwub8HqTA+GMRKaWjWwnpaG8cKXYGe5FAQ0hT7lsZptwz4Ri6H1c1tGAZq2IFneIKJKOQhhZ261WprcUVhMiEw33n3Npf7lkHu39AGChSNrRgxC7J7DGiM2gLNXYyhhb5iVSMV6ygByYlpdgZayrofLxE4Am6m3FPNMWxVz8TZz6Ci/R6+OiMmxPngp2UvA5tu0IwsDtsEiF6Hs1BIF+H7/CZ7btvmIuoohM9ZyqgwtGNFzUE8Mdf8cLADwxjaLFjYIu4X2aKQ+KYIOg/NbSkLujT+RwWdkU0tBgfZqlWqy+YaecdRp7Dgs5/58ayrL3/ZtaNdy5lQwtG7JLMHgx0E3Cuhhb5iVSMV5yg8yYlldgZWtClj92Mu/czvG1tGpgzyeNlC65XSBm5SyC565BZjSjgY8+O1XO0d+j0AvwgC/X7oDJe+O+69Vd1KhzbLFB3sDjnj7/ihQGW5TMStnSxcaTYMZAFOrBjWT4jw21rWXBMEvQfGlrSFfSLx2vnmj2uFNDQQib7OnmqhrqU5MaEd7Ini4igc98xdpfnZ9b+uX8qZUMLkqDHmj0oLiDjbuRqaJGfSMV4xQm6YFJSiJ2xpoLeto/L8QtYPEF3M+5uYZkPs86Gz2f7r9zf0PQ9Htdk9XPrZqdVV3kwhxwN7EHMVgsDz4ezuKfv0P3Lz0+5+xynE/7mkue4ZXeOaLy2xeOveGGA+XDenEP2cTqfDOPGsWJXJjPIAA6YD+fO7ke+UmSOySv0Hxla0hX0FnmyPnSloniGFjK/NGofbPW9szDNxqIReYfOf4dufvp3vnz1zqVsaEES9FizB4X7+JmnoUU2duF4xQm6aFKSj52xRoJ+VKlUyHxZqRzReHUqFZvNngLQBN3LuPulX90oPVjCloNA0K+cye3ctY9hCDq5CPuQgK+k9dACuy1K/O4YX9B9juQGPKR7StyFnTRH2EVScXeR8Mdf8EKBu1Om6X9qxo3jxO6OrFPtOzbluztlvtvlhILMOCYI+k8NLWmn3IdXE2aQLqChpcaqV1FTnMF2kzbCuiB85581W55ZluVtaE7Z0IKWco8xe1BM3GjlaGiRnkhD8YoTdMGkpBA7Y40EfdvLRsNtdslqWaT2wBxk3AlOoDmLF6TEFbrxOA+WsziCzpDWdVj9gHoInDMgukK/9d4qePegLMcyqN3gKXIsII3GJa2J11zG4FavoXGc2B13Kj0/77w1+EkdEhxkxTFB0H/6/it9U9wQapMU2dDCGeFOzUXob+K/88c6e72x9pb71i47mbKhJU1THMDLuOdpaFFyUHPxihN03qSkEjtjjQT9ZyhkNb8Vsa4ci9xa9KcoMsdfL+jGIZwqoqElIuhkwRfOuPPfOTgAYXg1HrxlXsqGFrxta1GzB3cuV0OL0kTKxStG0AWTkkrsjA0U9ALW218R68pR5JULMo5dLpDmmCDoPzW0pCDofjpiSH88uuu84hlauuZzlRpa2Avy6smCNnoE3C8a1bjvGGdkhQebmilSNrTgFZaJmj0AEyco+JqXoUVhIuXjxQu6HzuGYF+wbOyMjRN0EbmIXcbYBI4avwF86VcJQwtK6dfRaDR3JqMRW+X5hoFHGJsHr16LZmjZWZgLMLRQVYBXYeaHpwQ1z74jfIfgP2v/YTZ7ZR9SNrRgln6NmD2CjDtFXoYWeUNjEK/7UqlEtL1UYluiamIZ2EDQpWO34YKuoaGBBb70q4ShBaX0q2fqYFrgr/KuRmRsjmyKoMjI0LLDGq+wyheLUm3H/0qwyuO/A7iZBZufUja04GRXkswegcedfcrH0CIdOy5ep15mnel44gpdOnZa0DU0NFCg26fGYDMMLTp2Xx9LY8Pbp2poaOQFCUHnDQO5iII2tEhCx26VY2mszjFvUVYBwj+YhoYGDhTvx1xEIWOsq6FFx+7r4wyRtyirQJl8QWKkhE3gqPEboB+wNTTyRt6irAIJuoXMoqyIdeVY5C1dP0WROWpB19DIG3mLsgok6BbS57Ai1pVjkYuu/BRF5qgFXUMjb+QtyioQmZS3Kv5mwnKP7pSJ0sUr/nP/UWLFf0psI0KcDk7fRrQqavPQOWzxJ2RFobx95JePZruBYspTpcAxdOxj2Jx65W1VOR7dVeimn6AAV7g/VxplUa9gp0xMaapgHC923DXJX6sCisxRC7qGRt7IW5RVIBA58ve90qJ+dmXwfT0EebGrgow3aE2mEhQKKJU+In/XEDa/TshEeTGf306EgpSyokDrZzJB3+qwegjpdWzkOArHHGgnBSroihzLRODY0wkrkU2YRQqRpNe4JKJ2wTha7Phrkr9WRRSZo6Kgb4J5Y10NLUX5PVVQkNjlLcoqEIhsd9rbXrXJJ9rudttOrTxvtWHWT9yaAHHF9gHDCVn2uCUnW0T13rhz0vUQoH6mK+i00FEvvY5yPEf+mMc11N6igq7Gkahdf4tVHmGlsRlTEWm1Fh0HDXSi42ix469J/loVUWSOitvWcpkAtaFFEjp2qxxLY4O3rYEcBP0gqBq0Uyvc1DVLvrwlCTqZK91SR1B5uOmMycypWuBoYHMpd/iDNQwSkQJH/pjHxPFS7moc23bFKyTGBL1s25HKYvhixypQDSP9ILhxtNjx1yR/rYooMkfFwjK5iII2tEhCx26VY2lseGGZVRs8ycZox1wE+kYEfec0KMXo4cLx6g0bU2fSHDutixFMmhSy1yFVu22umeEnaxUkAJ+jwJeH4xiuoCtxvLQ7vnxXKLmtb3lhIKljIzeOFrvwNZmVoGfHkb8fFQ0t0hi2Rk5ct7WL8SF7/VVQQ0uVlXVlx7XgmEf1pOTVFF3+tf4e8OdSNrQUL3arG1rkY8fH6x16bd9H/iq82OUtyiqIkAkE/RN+HKcl6C9ml3Wthw/MFFcKB+nRGU+v54fjIT12nPHVPJhTJa9DNntwgn7U6USbPOFzFPhycO1w9E8Vjk92++i4028Hi/KYjHsKYjdymsPHK+YESBrHil34msxK0LPjyDdnUTS0yGLKFXLn0Zozw8BjQQ0tVSIDJfJfN3TM4z4oEr47mz3sW0vuZMqGlqLFTsbQghE7aMdSIvIeFgu82K2noLfp+0pyl6Uk6F3zY1EnMaqDwpXMUq27iKxhx87b/HDiOIeg6BfN6XXQUUDhPSz0JfYE/fLp2D4OPzkbaXAU+HJ4pC9emaCrcGzbd50+ub/6nqJfxmTc0xE7omXTOLELxpFiF74mMxT0jDjyzVkUDS2yaM7Hzbh+6C3nbQivhSYFNbSc0Fvv3TR34OkaOnqcRlThdNE9dV//3VgHxtL6x51M2dBStNjJGFrkV0ZBvHZM852+wAz9TWixW1NBvyQPzIPOILV36CWzfkpbD7+QD/eQbq/W6TGHkXPYhJ5BbNnTPHTepk2/fZC0KMCN5Qn6Fnmw7Ke2NY/nKPDlMKb9KF1BV+BYsfvbtO2yt8J5ism4pyN2twlix43jxC58TWYo6Blx5O5HVUOLLIBAnCgYF/BH05kX1NBSM+kmmjrM+SXW8Lwb3kTKFhdUFKCV9tLaI+og07FLwtBStNjJGFrkJ9IgXi/mMzmsLsIPY2ixW1NBN1gCdxANE1aM6P1V49JeNRoqDiP2JHlL30u+OfPmtRP0Y5a8DhmhIOVe/hzQNbsIfI4xfClY+ssVdAWOFcai56dlBzEZ97TS0a2EdLQ3jhS78DWZaco9E46h+1HF0KKCWFGguAIfYCENLayxZpWmAd2JvxatCuGfO7f2l3vWwe4fEAaKlA0tRYudjKFFfiIN4vXMlkSl8MqI+55S7Ix1FXSK9B6Y3X/7d+7ce/iadq/NK1C7kXM9fHTGzYlzwU5KXofsldbA7viTSDlqV0mBYwxfCscZEUycuSJHN26f3uQYm3HPxzCGFjuG4Jr8RaY4JI6Jgr66oUUFiaIwnc9hYVdEQ4vxYZZqtfoC1nolyNqCNiQLunFjWdbefzPrxjuRsqGlaLGTMbTIT6RBvEh8qi/vXz6MKcXOWGdBJ4uhaD4a6/0yDcgHLFyfuWMeY7YkeoOFeqsF1+qQLIfcJZDcdXgUmE79sX5aWQieYzeeI7OxUKhxdNOxd166ITbjntqWLiNhSxcbR4odA3dNZrttLQuOCYIuY2hRQbwoXDxeO9fscaWAhhayRq+bZonupenSd7Ini69Ewdhdnp9Z++f+iZQNLUWLnYyhRX4iDeJF4vNimvdfCrpK7Iz1FfQyiVbUooLmUVnc03fKp/BapL5Dj+lzmHE+23+lBy0Hnimnc+8SvSarH3gHRKG4yoM5pExvsqeVXm/JcuSPDY4jQ9MvWiLLccuGtc1RxwtfbMY9taIrb84h+zidT4Zx41ixE67JbAvLZMExQdBlDC0qiBeFFnnqPHTfOhTP0EKepRu1D7ZC31mYZmPRiLxDBwQ1MZZ/rX/ny1fvRMqGlqLFTsbQIhs7Pl4kPrVvBV0hdsYaCfpRpVIh82WlAmmvuz65v+6+TdtK31+wE6HE7SIp+btIHixvy8EEEtKO/7bnypnczr2t6QiC/gRcyXW5SjZMlqORxJEiEHRpjrCLpOLvbIrPuKdYFrXpf2rGjePEjrsmhWtVQJE5Jgi6jKFFBUlp2+HVhNlaCmhoYQ6dKjXFGWwHaSOunpUvCv+s2fLMsixvQ3PKhpaixU7G0CI9kXLxoin3k69T7iqxM9ZI0Le9bDSE5rhT6cXkv/BqBXxArQDmbBDrPASr1+Eb1EMI3lw+zoNcEYKgf8Jm0c5xzIWYBkfu+IsVujTHMqjd4MslQTqNS1rQoMSbRPzVa2gcJ3bcNSlcqwKKzDFJ0ClWM7SoINlYNXTAI1BgQwsnBKfmIuZ38kThj3X2emPtLfetXXYiZUNL0WLHsJqhRf51CYDF6wemOKXYGWsk6D9DIav5rYh15Vjk1qI/RZE5fiXoKxpaVJAsCsYhnCqioSUi6GTBF7PI80Xh4ACE4dV48JZ5KRtaChY7hhUNLUoTqRsvtm3NiGxbA2DEzthAQS9gvf0Vsa4cRV65IOPY5QJpjsmCvrKhRQWcKPjpiKHbJMlxsxNFM7R0zecqNbG4vQ9PFmbD/cr9oiGUofaPz8gKDzY1U6RsaClg7FY2tChMpH68WGGZD7POxtFjZ2ycoIvIRewyxiZw1PgN4Eu/KhpaZDEdjUZzZzIasXcHvmHgEcbmwavXohladhbmAkwsVBXg9Zf54SlBzasBfl8qlRbQytld/f1n7T/MZq/sQ8qGlsLFTsLQIm9o5OLlln51pRs/dhsu6BoaGljgS78qGlpk0fS2UTIt8Fd5VyMyNkc2RVBkZGjZYc1ZqCo8L0q1oBmUv8o7NV14C72bWbD5KWVDS+FiJ2FokY6dEK8TaM7iRQg/dlrQNTQ0UKDbp8ZgMwwtOnZfH0tjw9unamho5AUJQU8ye2QGbWiRhI7dKsfSWJ1j3qKsAoR/MA0NDRwo3o+bYN5YV0NLUX5PFRQkdnmLsgqUyRckRkrYBI4avwH6AVtDI2/kLcoqkKBbyCzKilhXjkXe0vVTFJmjFnQNjbyRtyirQIJuIX0OK2JdORa56MpPUWSOWtA1NPJG3qKsApHJUbtj297Okc8BOY42M0Qs/nP/UWLFmkpsw0GcDk7fRrTnWvPQOWzxJ2RFobx95JeP5o8FpMDxfG9mWWd8AXeGizGUt6XUVDke3VVY4aZyG3bNfNvnAgdXUP50+tU4Xux6AS/hWuVRZI5a0DU08kbeoqwCgciWbXcqfdvrXG/DcWrtiY0qyHiD1m0qQUGAUinUPZVgCBsoQe0u5vPbiVDUUFYUeqB5rojzxwLwOR5Y1uzPX7ElC6A1Zw0+HpU5lmEj7wDqcpShrsXAr2uRyAsFboOSiNoF42ix43kJ16qAInPUprhvsa6GlqL8niooSOzyFmUVCETatN1fm5ZBuLShgX07PbGrNsz6iVurqRTX9ohgOCHLHrdsYctosm5BLqTrIUAtKlfE+WMB+Bz3rBv651nob2o5b0NQhokqR6J2/S1WkGqLNmL+/LbPBQZYC9Fx0FwmOo4WuyfagnmbPmXy16qIInPU29ZisBmGFh27r4+lscnb1uhyhzXQYdWNyp3oEh1J7LpmyS/CmyToZK506xNC5eEm9FF9VC1wNLCDNDt/LCAFjnRpvrRm4b+K9jxqOnNVjuTZy6sv2WNNIb7tc4GBR+ea/Dmch5ev3Dha7CqsLihrfcFdqyKKzFEXlonBZhhadOy+PpbGxheW2aa9j47pCojMoU/h80illc2FUFR/5zQoxejhwvHqDRtTZ9IcO62Lkd8cXfY6pOxcEeePBaTAkWJpWbG/1BVIgxLHS7vj14t2u11wIx7wxe7aod1tR85j4jha7CreyyDfHbDN9enyUGSO/P2oaGiRR7xhwPFxUUxDC9+juXpSMuMWEaw8LH0NuPxr/T3gz6VsaEGJ3XQ8F/r5+vgdhpZUY1c9vYcCsPC/KMYub1FWQQwd1sWXTJ7lp09+8vSAE6MXs8u61sMHZoorhRviPTrj6fX8cDykx44zvpoHc6rkdchmjOifIlLgSPGf9Sful5rO53CzqXB8sttHx51+m2r4nV3p9fqd7/pcYGDkNIePV2SFeps8jhW7CrwFgmdN/5r8/DZ2GMiOI3c/qhpapJFgGCCjgENnUkxDS7VBZhjyX5cc3wtFvwOcLFjjjxfD2J3NHvYFx0vKhhaM2LUcZw6xiyj6LzG0pBk71hiXCrpi7NZN0NminAj6E5lBUhP0rvmxqJMY1UHtSmap1l1E1rNj521+SB4tD0HRL5rT66CjgMJ7WEhHew2egmMRKXCk+EPfpIu4eLx2ri/YoTzHtn3X6ZP7q08VfZvcXZWYbh6piB3Rsmmc2AXjSLFr03foW53gmoxJIBWaI3c/qhpaZJFkGHAc+uPaaRXT0PJiQhePUxP6aJ8uuqdxr/lOaEevGnRku7EOjKX1jzuZsqEFI3Zj+jw5pm+CePwWQ0uasTOeoTEuFXS12K2boG+zTC0R9F6agl4y66e0PTF5GjbuId1erdNjDiPnsAnLV7bsaR46b9Oml4OXFwV4bGbzBn8sIgWOgKXl9fPjQB6rnUNXKuQ5Vuz+Nm3BDLfWnT3o3WW2Qr9NEDtuHCd2lx2y6ukM2v41uR3zVqHQHPn7UdHQIoskwwAT9CH01C6koaXEGqF3YR3HFhFxKXf449RcGLSV9tLaI+og07FLwtCCkl2hK29qyBHwWwwtqcauYXopd7XYrZmgE0mgeU2act9KL+VeMukmtRpNozDUzGfxbxqxJ8lb+l7yzZk3r52gp6/kdTigNxabN/hjEelwPJ+RKy0Gw6sJY6rAscLmjR6kwHps/s/EFEfTzq2EdLQ3jhQ7g71UGHi8vGtVRJE5Ru5HeUOLLJIMAyO6ZG8pmz0EZGhocUWgxqpCJBtxDeMd5qFza3+5Zx3s/gFhoEjZ0IJnimu6yZQAv8XQkmrsTNNwBV0pdsZ6CTqZI9m6Lm1TnBuTd+7ce/iaHrF3PVeQKho518NHZ9ycOCw7LXsd2naFYGB3KuKxiFQ4Ej0XnBochmA1UuLo3l/0pXL0XvOQi2EMLXYM/kLAv1ZFFJlj5H6UN7TIIskwwHBNH1iKaGgpme/w4/lbQb9fLMDMc2NZ1t5/s+AdWcqGFjxBv4q8LvkthpY0Y8fscOxPldgZayXoPdubI1mWz0ht21qXBecDFrHP3DGPMZtU3mD52mrBdQnpPncJJHcdHtk+hOMQ8DlCIihRzw3jEJ5dVDi6eeg7WKhnKegsm2ckbOli40ixYyALdEYruFZFFJljRNDlDS2ySDIMUFxAxt0oqKEF3sOeLL4W9J2XZ/OZ7bfZXZ6fWfvn/qmUDS14gh558Pw1hpY0Y/dCZ1km6CqxM9ZI0I8G9sDTb+bDubP7Ebo4MTox4Tn4fgGheTHrO/SYPoeRlez+Kz1oOWD+ns49U+Y1Wf3AOyAKxVVeJfbYAz7H833+AiOkJuxt69Ctm+O4L19lOW7ZnSO6aN2m7pRy/AI2raIrb84h++jzCo1jxa5MZpABHPDXqogicwwLuoKhRRZJhgGKR89vVTxDy87CNBuLRvdrQT8xTbPufmP51/p3vnz1TqVsaEET9Gaw2dfDbzG0pBm7Ln3P7gq6QuyMNRJ0Mmn0IQld2TL8nTJp7UQwYCdCie1EqHLHBl3KulsOJrBXxvHf9lw5k9u5d7UWQNB5joTU3z8Atkq/9faWPDqT0WgevHqV5gi7SCpsFwm51zpwPPiGFwrcHTFN/1MzbhwndnfkgrTv6LQhXKsCiswx3FtBwdAiiyTDAMXEvU4LaGgx2A7ShqcFSSn36nuDZdT+WbPlmRWk1VI2tGAJOln/XIXHfouhJc3YsZOuoCvEzlgvQWeg88bWIN1aAR9QK4D5v/m6AdwK3Ri+QeOSIIH0OA9yRUUQdI4jEXSGB3rGX+VdQbX6OVcLQpZjGdRuwJJel6yWRRYOcMNoTbhaFj6v0DhO7I47lZ6buxSvVR5F5hjKmKkYWmSRZBgAeBn3IhpaGKiDnSLZFFc1zR3YYXr2emPtLfetXTacsqEFSdCJnrcig7/F0JJm7GDtVCo1zEVJLXbGGgn6z1DIan4rYl05Frm16E9RZI7C/ahmaJFFkmGAO1dIQwsFWeR5y4ZkQTfqcOrgAIThlTyJu8u8lA0tOIJ+68To+a8xtKQYO1ZvhkItdsYGCnoB6+2viHXlKPLKBRnHLhdIc+TuR1VDiyySDAOACacWRTO0GFA0dAE1Yxg4UbhfNFjppyr98WKabiWoM7LCe/C2mqZsaMGI3XTCu91+n6ElzdgxnPr/i2zsjI0TdBG5iF3G2ASOGr8B3P2oamiRRpJhIMi4UxTO0AKvv2glOKICpVJpAS2bWbHpmldK9AXGFiYrY2JApeb9h5lXCyplQwtG7EiwDmmB3pb38XcZWtKMHUMg6NKx23BB19DQwIIo6EqGFnkkGQYCjzv7VDBDy/OiVHMbQJ162VmmBf4q7x2aSiyeA4W4mQV7U1I2tCAJOgMzNP46Q0uasfPO+v+7bOy0oGtoaKBAt0+NwWYYWnTsvj6Wxsa3T9XQ0MgHEoKeZPbIDNrQIgkdu1WOpbE6x7xFWQUI/2AaGho4ULwfN8G8sa6GlqL8niooSOzyFmUVKJMvSIyUsAkcNX4D9AO2hkbeyFuUVSBBt5BZlBWxrhyLvKXrpygyRy3oGhp5I29RVoEE3UL6HFbEunIsctGVn6LIHLWga2jkjbxFWQUik6N2x98dU96qxG18xSz+c/9RYsV/Xg/++PXbeQzHUPrV6yhwKNRAkhWF8vaRvzumwjZbpFjLIuAYOvYxvEXjeHRXYRtHyj26Uyb6jTTKol7BjpiYsmLBOF7sOF5sN1BMeaoic9SCrqGRN/IWZRUIRLZsu+PVr2ClGNMU9Cps+mxADYdzWuQ8KuhDaM4yoc1ZLubz20lQ4sKQFwVag9EX9AGUQ7iLfAmfo3gcAI9jGR5PqMKVoa7FgNW1EJFe45KI2gXjaLHjeW11WK2V7zo2oiAzjlrQv8W6Glr439P6k9/vkSYKEru8RVkFApE2nR7btHD0dqe9XUlT0KsNs37CagIsZ3vLPzGC3nIOh1DWyLkwHp2W0YS+6D6k6yFADUZf0OMIGmlwFI45PDpQ/KEJqqDGkahdf4tVr3iirYq37e9Kf2OAVZUcO6PkcbTY8by2aJGqXiYd5bLjqLhtLRdoQ4skknllJug6dnHIW5RVIDKh8sYa6LhtmGPoIold1yx58vZqQC39qKDfshnykKx8oPJwE5axj6oFjgb2UWaCznEUjjmMWHnssXOryLFtV7xiVBVWN7qdRScyVj1sGOnlwY2jxU7gRYtU+c2eOBSZo2JhmVygDS2SSOaVmaDr2MUhb1FWQQydbb/fUZqCvmMuBH2LE/QmXRMNnfnQmDqT5thpXYz85uiy1yFlFwj65fZlzJfwOYb5ehixPC20KlbieGl3/OKSP23whIGkbpvcOFrsorw+WRsyAUXmyN+PEoYWHGRpisjQ0ML3aL7vLkzzOdpurXpS8mqKLv9afw/4cykbWlAEfTqeCyVeffwOQ4t87LpB7MRe2wF26HfoK03F2OUtyiqIofPp31NpCvqL2WVd693xOEE33pzR7e3hHK6/R8dxxlfzYE6VvA7Z7CHMIZVooyB8jmG+Hkas6dE1lF9W4fhkt4+OO/02y6x8wtBxFoJOHkiGj1f0gSRxHCt2EV5Hnc53scNAdhy5+1HG0IKCDE0RWRpaqg3TLJH/uuT4xDQXpboZacJ5HxQJ353NHvaFOSllQwuGoLccZ843YfHxSwwtGLHjj3mckEc0iOmLcuzWTdArflzSFPSu+bGok7jUXYWLFXR4nHRGF/Twojm95psDSr+HhRnDn0MqPbIO6kQaJ+BzDPP1MKbv0FtzKhYKHNv2XadP7q9+GY7hXTO5y7IRdKJl0zixC8aRYifyunw6to9j0itF5sjdjzKGFgxkaYrI0tDyYkIXj1PTvIebkUz75M/n0N90uuieuq05b6wDY2n9406mbGjBEPQxfZ4cCz10AL/F0CIbuxM6bb6b5o4YRx4ntBtbDXqsqsVu3QR9O9C3NAW9ZNZPoVcOvbWMxBX65PaNrdCpuL9Nm37rJ2lRgMdmdw45Aj0o96NPlvgcw3w9XMwdZzKfjB1v45okx4rd36Ztl59ox0Z70Blk8g6diNptgthx4zixE3mRJazdz2RrXnYc+ftRwtCCgSxNEVkaWkqsKWqXbhylmn1qLkJ/U9Xwe21DK+2ltUfUQaZjl4ShBSXlTlfeTWceGv4thhbZ2NXMD/hRh9AIceRBu7HRmKrFbs0EnUjCp3ecrqDTGNW81Em8KY5eh4c0Q/TmzJvXjuN4738kr8MB5SR0aexFX8Ticwzz9TG9nh+OhxNVjhWaeSBcIAXGku+DaPhSSke3EtLR3jhS7MK8yp8D+7sMLQay4xi5H1cztGAgS1NEloYWV6hrgRCQVV7M3+Z+79zaX+5ZB7t/QBgoUja04Jnimo4TGvkthhbZ2LGmqFVqQ4rGkcc7ZF2UYmesl6ATPQ+sDekKOo3Lu/c5TtA5wxg5vh4+OuPmxGEZeNnr0LbhRd3A7gSK/hk14uBzDPMVwR6qVTi6seK4ZOMA/8H8jxY7Bp5X2bYjElBkjpH7cTVDCwayNEVkaWgpme/w4zkQgvh70b1RjRvLsvb+m1k33omUDS14gn4VeV3yWwwt0qa4D7NUq9UXsDaPxpHD/WIBmXiV2BlrJeg9m9PzlN+h04B8sIXr94LeasHnIfngLoHkrkNmL6LwL8a76DIPn2OYrwCyQIfpU4VjO8yFLGSj+ei0MrRGQoaWjSPFjkHk1c8iC5Edx4igr2ZowUCWpohsDS3w7vVkEQhBKfz6yx11rXK7y/Mza//cP5GyoQVP0CMPnr/G0CIt6MZp3TRLNKkejaOHnZdn85l+RyV2xhoJ+tHAHvAPWWkK+okJz1L3C+/24QR9Op+wN5FjsuqBj94ceU1WP7fMFa68yoM55MnuX4ovGTzgcwzxPZ/tv3pfH7bmzsT7IMtxy4bnY8LF2+fUibGopOahenMO2Uc/dqFxrNj5vMp03n+y7W/mfwxkxzEs6CsaWjCQpSkiS0PLzsI0G4tG8O71NH4jqS/osPnp3/ny1TuRsqEFTdCbsM1XxG8xtCis0Bu1D7ZCj8TRx4lpmnV3VD52xhoJOlmf9yEhXSG32RH50YGNJCuYM1c6ht0HJbb74PzPnz8za//PH/ZIdevtu7iYO/PRyPHV7sqZ3M69qxVB0OnOi8pqO0pkOQrH4NvwHmDeDh3HefNvL2mOPJe7Prm/7iJSl+Yup6b/qRk3jhM7jtcTXJsdIaHkosgcw70VVjS0YCBLU0SWhhaD7RpteHpNnq3f434nX9D/WbPlmWVZ3obmlA0tWIJO1j9X4bHfYmiRN8XBfoQqNcWF48ij+t5gGVCF2BnrJegMdHOoixWuw5WOqx9QB4AmvZaWC6Zx/grIuGDNWXy1e5wHuSIEQZep+SDLUTjmV+jX89HtBfeXyXIsg9oN2PLmuFPpxdmLUqlD0ppwtSyC2InjOLHjeH3CK9fOcYzIFZljKGO2qqEFA1maIrI0tDD4znai5+G2CgyeoP+xzl5vrL3lvrXLTqRsaEES9CCfyeG3GFowDY3hHQoMVbq1TSV2xhoJ+s9QyGp+K2JdOW5epdBcgNM+dXVDCwYyNEVkamihIAs7pgk1M0HPfUE/OABheDUevGVeyoYWHEG/dWL0/NcYWtAE3Y9jBHQVrxI7YwMFvYD19lfEunLcvF4euUCaI3c/yhhaMJCxKcLIytBiQGHXBdQdIcvzhtng6pLcLxrB2/QSl8s9Iys82NRMkbKhBUPQpxPe7fb7DC3yhsbnKpdU8eNocLGr0h8vpumGUjZ2xsYJuohcxC5jbAJHjd8A7n6UMbSgIEtTBEVGhhZ45UWridH1eb0EYAJR84rA3pMhohWlkqv2/1n7D7PZK/uQsqEFQ9BJiA5HgJb38XcZWhQMjQswIFEV5+JocLF7gbgtgsSLdOw2XNA1NDSwIAr6yoYWHGRoiqDIyNDyvCjV2KYmUAEGt1WLt8o79ca9RfrNLNj8lLKhBUnQGZih8dcZWqRjxxqv1GiUuDgaXOzeieCbC67hjmzstKBraGigQLdPjcFmGFrQtq2tAh27OOQtyipA+AfT0NDAgYSgF9cw8FNshqElF0HXsYtD3qKsAoR/MA0NDRzo+/FbrKuhJRdBzxgFiV3eoqwCZfIFiZESNoGjxm+AFnQNjbyRtyirQIJuIbMoK2JdOW5ehjYXYGxb09DQyAV5i7IKJOgW0uewItaV4+Z5qHIBTmEZDQ2NHJC3KKtAZHLU7ti2u/uAPxaAV7jp/qPkbhz5b9+yzoLmOB5GbBMGdANsHjqHQg0kWVEobx/x5aOP7iorFXGS5vh68MeK6SgHmL6N6EYTFY5B0S3YSvI5ILGLNjNMZZfTFeyImX41jhc7bmdTKI4BisxRC7qGRt7IW5RVIBDZsu1Opc92u/LHIrDErgobBxu0DsCDZf35a1kRRR85EyiT8AZ9Wua3E6fJnZMVhR7b2MuEoAwbKAdp1UPgOZ5zxepFDOGxZdJS5MjKYpOIQemmHvlEYvdde2IUuDUrImoXjKPFjtYeGbj1OoQ48igyRy3o32JdDS3aFPf1cYbIW5RVIBBp09aFbdrviD8WgdW4pGHWT1hNgF0LSvTtRS/jkT9FPjoto+m8ceek6yFAPSOvlsXA7m/F1HBKgeNytreM6/kOvdAPW7T+gxJHKIdtuOwubaif2c5E7FhVyTHNoiSMo8XuibZg3mZPKnwcBRSZo962FoPNMLTobWtfH0tjk7et0eW420CHPxaAJHZds+SV77uxzsifr7PIEj0QdKg83HTGZOZULXA0sINULdG8ODlPg+OrIfR8D0D0wC3npMSRCXqZllN+oo9h5U50iZ5WL49hQi8POo4WuwqrLclaX/BxFFBkjrqwTAw2w9CiC8t8fSyNjS8ss+0u9sLHHpDK85oLvyHCmXUDP/6wHxyIoF80aZe/qTNpjp3WxQgmTQrZ65AyYkJwaXfi9TwFjoA4Qb9wvJrKahwrVNm2qJQf0+wK0b+n8Ldy6baJFju3PwlNtvNxFFFkjvz9KGFowUGWpogMDS3VGi0fyo5PSmZsQ+3TeygAC//L8q/194A/l7KhBUXQp+O5UOLVx+8wtMjHrsvFjosjD1Yelr7CVYmdsXaCznckjulOjBSjF7PLOtUbVOheb/4zHqyH0F/FTHEjuBAfycH4ah7MqZLXIbvqvAZP7aPjTr8dVXV8joA4QX90xtPr+eF4aGBwpHloEL7y06crfALw53/yzDV8vCIr1NvkcazYVVgnruPg7Xk2gp4dR+5+lDG0oCBDU0SWhpZqwzRL5L8uOb4XCrZzoA06qaDvzmYP+8L9mrKhBUPQW44z55uw+PglhhaM2PHHPE4WZJxI+oti7Iy1E3R+YRezyEPriPexqJO41KtU6G4s6zxO0Ee35JmTrmEvmtNrvjmg9HtY6FHMrr22fdfpk2uyn1rXv4AjIE7Qx87b/JA8Ph9icLxkDQyJoD+RGSQrQSdaNo0Tu2AcKXZt+g59qwO8+DiKKDJH7n6UMbRgIEtTRJaGlhcTunicmiaswRfd01KcoP/f3rm4N3Fjf59xTNNsYqcF0pYmTuIAWxznBglNwrVAgPfS1ly6pFc26bY/WN5ut92NgbSEGEhTKOV/fqWjkUbSaHyZm2znfJ4HMpbHY2s00lc650g6TzfrAkH/MPu+cyX7kfRewgEtcQj6LPQnZ9nu5xLtEtAStuyOQhN6OZN5Wy1HmaOwA9tBuiNbpLJzuk3QByU79KDJJh3XnvWHjsEWtxdA6M4YBX2BmtunRtiwZ3ikcGlhmNunw4sCHQKwZy+XnxyEbYsbmKbjyCPFJOjThZFhui9SLHlkznMq6KUUBX0pQOyk9HjK7kSRjOCKZfChy+Wo0sl5lOtjiICWOEgzKCLNgJYetrHmEToGP+Co+54LhjLc5E630r6SfZeoQ4gdu8IEtMRicoeR93BhUUtul4CWsGV3MHOV/jlEi0wuRwXYge1YZiVi2TldJuhE3q6Zjj3iql9QRgep6QRM7u8bTO6MJXgcLxUWh+cLhQL3/4R8Dstgt+SCTkd5Tsm/B3P8eaSYBR16y0vge42axzKrZGByH0jR5D4aYI7m6TGVncMcJFCEcjmqdHIeffWxtYCWOEgzKCLNgBZXwA9yITAKeibjuIJ+PHv4yrvZ99/ZT4UBSDigJb6guOFCQUtpl4CWsGXHohoOQEiSXo4qlzPnI5ad012CTjR8wHQsEWv9ukxfBwbFMU5SU9F0YX7qbGF2eK4ww1JDPofMvVXOF3PimTQECsSfR4pZ0KFXHUseXYt7uwXFxVZ2DDZQlctRpZPz6KuPrQW0xEGaQRFpBrT0ZC7TP+frCToTDvb/h9ls9t2/nvLapIQDWuIT9JM+d0m7BLSEDoq7muk5ePDQCh2b6+WocHplhVriI5adbVGOgpaVUt7TcPlYJi7/MhTIVTqIZdPWHN+0tXnWqFyig9jRUfpcTpGH0h0ChXsOx7wAVG4ucpbZQF0m/jxSzD70+PLoWtz53xSnrTkBFlqWHlPZMcgAvaSVo0on59En6K0FtMRBmkER6Qa0UN/r0ZV6gn4BaioTdOedK8c/yB72mqSEA1riE3Rfx7NtAlpCC7pz7FAm0wNGdb0cPd6+cD5zHs6JWHa2RTkKSkbGyvnymOFYJa44B9qXOr1CqxVbWOaj7HvsrYXFOeaJPFsYmQH/8kn2xjwZ/SzxFxFHefTZG8gXx8AQ4Ysejj+PFEnQRR5HC4sLkEc38DRKHl2LuxuHs5yf9J2RVAzVpcIIeynypaXHVXb9pAUpey/TXFgmjTzqgt5iQEscpBkUkWZAy9srmczQytCReoJ+BPyzrqDTyU8fHb9ykb+ZcEBLbII+7E2E5bRLQEuEEfrQwatshK6Xo8fRTCZzyE2NVHa2RTkKSkbIgzcJ8y0G1GOVmMqIzj7ocWcfuEu/unK3xOddTM0VCtPTBeHtOVmYW1rkT2sMgu7QmRe5vN+FnkAej+/fv/9U9vD+/azbKPLoxJdHbnEXM2XSnOU0LF4Nm9LjKbtlkqn8siRr6S79mnwe9b0VWgxoiYM0gyLSDGhx2AzSIa7jJkFnb7qC/lH21JUPstksn9CccEBLXILujX882iWgJXxQ3Hny/wEIitPLUebA5SFmDY1WdrZFOQpKRkrcillSj1XiWivgKl0HwI3/fp9uzsKHr2IE5EwtkeZyzmtZzi56tqI4BL2fKkTZb8hMII9Xsi4sl1IeL9E8ekay8Hk8501rGiintw7J6Jy0loWXLzU9nrKbKOZKJ+SEtDZnSSuPmsWs1YCWOEgzKCLNgBYGREEDJkGn/e+enqHMSg81p31w8cPsu1cOZ99hbyYc0BKToBM9H/UltktAS1wBjXI5qhyAqW0Ry862KEehlXy6dORqfi3SrXnceyuFWiGe7VNbD2iJgxSDIlINaAHIwI5rgkHQ2XozABljvE+F4aJzhg/zEg5oiUfQlwoGPW+bgJbYBF0qRw0YxUcsO9uiHIVW8unSkevtt0i35nHv7eVhhdB5lOpjmICWOEg5KMJJK6DFoQu+rtB1RxiSoJ9eGZKXaD4mPvIBGeHRSc1AwgEtcQj6wpwc7dZ+AS3hAxrPH4BgJJiCrpSjKLsD8OdCJuMWZYSysy3KUWgln0asiF3K7IU8Iu2AVB/DBLTEQppBEUBKAS3U/QWriREV6OnpIZrQ08NWGzuoLgPrCfpfs4fPnDp1kb1IOKAlDkEnRTRC95ueHuUv2yugJUJA4woNRgIVl8rRkcruAi3PlYyr+ZHKzrYoR6GVfCIIkiiqoLcc0BIPKQZFACkFtJxf6TnIJjXRhUMz0nLugSN058NT3uSnhANaYhJ0Bgs7aruAltBlxzZeOQilJJWjI5XdZSL4mZXzXs8sfNnZFuUotJJPBEESBbdPNbA3AlriinJvCSw7E7ZFOQox3DAEQeIhhKB3bsBAs+yNgBYrgo5lZ8K2KEchhhuGIEg8YH1sSLcGtFgR9JTpkLKzLcpRiJz5DimjSOyFPCLtAAo6gtjGtihHIUR2O9KK0iLdmse9Z6G1QhzT1hAEsYJtUY5CiOx2ZJxDi3RrHvdeDJUV4llYBkEQC9gW5SioORkbL4oZFifG6SwSw/JU8S3cdPpqD1vw5/SRlUzmvH8pRmnp1+GRwoiyBlJYUegfHOMzLLwFjk5oJyWQx+PvnpKWt/WYGl6gGyrQHQ+j5FHJS/9AzjTxNZlZTifpjJiFeunxlV2JPpNs7laO5TadpV/TyiMKOoLYxrYoR0HJyEA+X+RrIAwU2XoIjXZsDC92B+jEwSG6DsDRTGal51DGtxYjbM4yBxuXzCwuLs0VhqU3w4pCydvAly1BTHJZ1k+KP4/vZ7OnpA1oPGDHBBD0SHmU88KWYkxJ0N01K3xq56XHVnb9dO2Rsrv2SC5fphn2LRTa0XlEQW9Itwa0dMrvjEKHlJ1tUY6CkpFxkO9xWBJhABYBKvm1LraNS4Yyh46yNQGOZC7A/+e1bzpboAsjDNMW82xhlBxckt4MvR4CXZKRC7qXohJ/Ht/Nfgj/f6B/1TxdUxkEPVIe5bwMFscHcykJOltVchYsDAHpsZXdOdiCeTAPy2KbM+h0dh5x2pqBvRHQYkW8sOxM2BblKKg5gSaSbaDjnJCOFWISuyOZHrE8EwzN/RvoTLP9A2bJGJauPDxMh+pnoy5wVM6PqYLeLxZp9EggjzA0v5I9pX/VXIGb3CPlUc4LzU5ags72fZgK2MsD0mMruxzrebFtPFIU9PTyiAvLGNgbAS1WBB3LzoRtUY6CITuD0p4517wFDAUxLc+bWZGXW3RgPUbtm6aZDZMapRcKc8OzhdGZabFxeNjnEHLnbsEMDtiBFPN4JZvVv6pQcFxBj5RHPS9pCXoTu23GVnZunkpc0E8M6rEPlE7Oo1wfQwS0xEOaQREpBrQcYMuHsuOD3rEMW2IUlgO/8l72vffl9xIOaIlFvKZGpwuK48dlZpYGI0GR2QxoSbTsDhw7zZftjVJ2TtcJurQj8Vix6N/kKZ4yupA5wnaqF+9c9j3S02xDoHnqZT5bKBRmTy56bWrI55C1HvKywwaLe2J5/KtvyQg3HA7+j55HLy9pCTrpdE2dPckiAYLS4yq7HN23imaSCTo0IY2ezzhIL49SfQwT0BILKQZFpBnQcmAok+kh/45oxzJHV0g6kYsLjvPOqVNnDisRLwkHtMQh6AuwkLtf0EcXWdmdtRzQkmTZsc1VQdAjlZ3TdYKec9uNE+cm8hOGIVBcO+JdXTlEyuWQULse8KTLzIIPnTyNtCGdGV6YlzcHDO2HpU+dJOgnDBb3xPK4HzzpMmfB8coEPXIepbykKOhEyxZMYuelx1R24+BDJ+rGBD1XImPYYgN3SRykl0epPoYJaImDNIMi0gxoOQrV8HIm8zbszHUATIKn1W86Crt4HaS7en2Yfd+5kv1IejPhgJY4BH14cXZ42iTohUtTtEc2ZzmgJcmyc87TjdZA0COVndNtgj7I20gyRMhP+rZ3im/P+kPHYItbruLHfPZp0pssFOYW52YLfOLapYVhvn1QeFGgDYYk6OcMFvek8ngly/fzE8xCnIAr6FHzKOUlRUFfChA7KT2esjtBBqrlYpn50MdoX7N/0j9q7eQ8yvUxREBLHKQZFJFmQMvBzFX65xCdS9PDNtk8ktENt7CLFwTz0K20r2TfJeoQYseuMAEtcQg6ffhMgu7M0P+GC4sRA3VkwgS0JFp2Qxluco9Udk6XCfpYkdk1Kf3XyvmkrCg9rIwOctMJkb3Lvq9amF8cmZ1iFqJLhcXh+UKhwP0/IZ/DMjx6kqCXDRb3hPJ4/BR50jRY5lxBj5pHKS+pmtxHA8zRPD2msnPGJoqT4/1lKWslf3esk/Poq4+tBbTEQZpBEWkGtDDv6gEYNvSwGbIHfaIAXKbTbY5nD195N/v+O/upMAAJB7TEFRRnFHTgJO2SWQ1oSbTsIAQLzoxUdk53CTrRc1nf+v0hKrGJHZQLd5wTPT/qmIGOJXlO56fOFmaH5wozLDnkc8jcW+V80VV0o8U9mTwSPVciNYBCYZowV1icjp5HOS9tFBQXW9kxlIHqNX8gTifn0VcfWwtoiYM0gyLSDGhxrmZ6Dh48tEKbmR42eDhvFPTTKyvUmvthNpt996+nPB9ZwgEtiQv6wuIiHZTbDGhJsuyY6LP/I5adbVGOgpaVUl6rUJP+RzEu/zIUyFU+iA3UczJAp03L6Ch9VqdIg+IOgcI9h2NeACpLMFrcE8njmaxBz1kYCxA9j3Je0p225gRYaFl6TGXHIAN02oSwhsRZ9puQOjmPPkFvLaAlDtIMikgzoIU09ocymR4wqh8BP+zRFb+gv33hfOY8nOO8c+X4B9nDx8VbCQe0JCvoM2fnC/Osq2kxoCXJsrsArSwT9IhlZ1uUo6BkZKycL/NOVj+UzbnEXFpHM7QfTAbm1Ms8lBmSQhyOnzp8kR9PjS7SUA7GPBn9LLHI98ijPN6GGC3uCeTx+GH5ASPd5bkp75XwoUfLo5yXdBeWuVQYYS9FvrT0uMqun7QgEBp2Lj95QnUQcTo5j7qgtxjQEgdpBkWkGdBC+tVDB6+yUd7bK5nM0MqQ3w9LF63MHHJTr7yX/ej4lYv8rYQDWpIV9FEyYhhxS9ReQEuSZXcEhoSuoEcrO9uiHAUlI2R8PgnzLQYgiDGXK+oDdkpMZURnH/Sw2QdkfH6oh8JG6WfEAqmXRshzeEk8eicLc0uLi+7LeATdbHFPII8kU+/tp7BR+pI6u8QT9Ah5FHkZy9GiI+XnM4ElN8tpWLwaNqXHU3bLk0TXliGTMGsm586aUejkPOp7K7QY0BIHaQZFpBnQchAWojwAgVUOm0065Ftsmp5xeYhZ1D7KnrryQdYzqyUc0JK0yX3q5BwLbrcY0JJk2bEEV9CjlZ1tUY6CkpESt0aTTtc16hYpThgexLjWCrhK1weA+G8i6AzW6fJG6POL00sz0pedXfRsRfEIutninkAeiaAzzsA7gSP0CHkUeRnk5djAXRIPo3S9Ed6ISPlS0uMpu4lirsTtzvK6FgqdnEfNYtZqQEscpBkUYSGgRQqm8i9NyTgA06P2Zz+4+GH23SuHs++w5IQDWpIPipsq0OKyGdCSZNnRsVNPz1BmpSdy2dkW5Si0kk+XjlzNr0W6NY97b6VQK8SzfWrrAS1xkGJQRKoBLT5RIIM8Y5C7Oz3q/fepMFwkPXF3mJdwQEvygu6M0LdsBrQkWHanM4LIZWdblKPQSj5dOnK9/Rbp1jzuvb08rBA6j1J9DBPQEgcpB0U4aQW0HMmcPyDNpjlwdIWuHwOcXhliS2AcgD8XMhl3RYwPyAjvDJ9qmnBASwKCLspuCv6cLRRcy5KtgJYky45xTHw8QtnZFuUotJJPI1bELmX2Qh6RdkCqj2ECWmIhzaAIIJ2AlrdXMis0oAWUgLrCYFU44CDfuflCZqinZ8WbcfPX7OEzp/haUAkHtMTRaCxMT08vFuamp5l1RZTdWZq26LnNbQW0JFl2DE/QI5SdbVGOQiv5RBAkUVRBbzmgJR5SDIoAUgpoYRuvHAQlOL/Sc/BtcYoY5V0mopFZOe8pxIenvLkpCQe0xCHow3wKLCslUXYnp0naov2AliTLjuEJeoSysy3KUWglnwiCJApun2pgbwS0WDHrYdmZsC3KUYjhhiEIEg8hBL1zAwaaZW8EtFgRdCw7E7ZFOQox3DAEQeIB62NDujWgpVN+ZxQ6pOxsi3IUIme+Q8ooEnshj0g7gIKOILaxLcpRCJHdjrSitEi35nHvWWitEMe0NQRBrGBblKMQIrsdGefQIt2ax70XQ2WFeBaWQRDEArZFOQpqTsbGi/m8Ozumf3BM3dmQE9NMhB62rA+8vPjue9ns4TOmm7twaRpWjx4eKYyMym+EFQU5X/3jyc62aCaPU0sjhcIc3/EidB69RbdOqOWokMQsp5N0NtNCvfT4yk6dHTO2nMv7C6+T84iCjiC2sS3KUVAyMpDPF3OTeWnn+kQFfYguvkv3Qbh4OJvdT/696+hM0QmUc6ShnFlcXJpTFjUMKwpSvmA9hHJy6yE0k8cpIgnT5N9sxDyyZbFJ6ZW1cqyTr1hw1xvxqZ2XHlvZqeXVTydnl9NcayX5PEYU9L0QvNGtAS2d8juj0CFlZ1uUo6BkZBy2vxtnSyJM0HWpEhV0sX7D+9n3LtKlffhS+gKidiOj7rKFo84w2y3IJfR6CF6+BvKT/XRLgUZ7XiSZx7MFuvjDMFWFSHl0l8OG3CnlqJDUSqGz3uYy/vTYyu4cbME8mM/TRbeIuk8ONF5YLA7Sy2PEaWs49an+cWhw2lpIOrLsbItyFNScwJDO3UCnnE/a5C7E7kz2I/rnPb5rqoC0le4yVXTl4WE6jD0bdYEjKV8ltplAwz0vkszjNFuPcbawFC2PTND72YKTcjkqJLWXx1TAXh6QHlvZ5djap2wL5vF8ziTnnZ3HiAvL4OIk9Y9DgwvLhKQjy862KEfBkJ1Bpg3wf7KC/vYxtoTflex+8v/FLF95lzNT4OsNOwuFueHZwujMNG00gbDPoZQv1/CeLzbY8yLJPLp7KdBtpiPlMQcFNSCNygf5HlYSVnbbjK3scpIz6ISh1BidnEe5PkYMaAmPOWCgIJjpzICWAwdh+VA4Pn1kJZM5b9gN/fIQST9Nj668l33vffmthANaYim7hdlFZXleQXsEtCRZdt5ua29HKjun6wT9miR2iQfF9UDl+Si7/8yZ904pZeDQEdDswvziyOwUHBcKsycXvTY15HOo5Gs5nyuVJouN/LBJ5nGabXo0TyP/oudxQtq/w7A7cQJiRzokU2dPQockMD2ussux3Zxgs8Zz+fGxieLkuF/VOzmPUn2MGtASmoCAAZJKGSnMdWZAywEi1T3k3xFyfDSTWek5pG7rARwk55B0UmHfOXXqzGHFnJZwQEscZTdaKCzKG+gI2iSgJcmyY/uhk3eGopWd03WCngMP7DkwRycq6D0HSV9rBZbWJ12q7H7dg+7MFi4tjpCu5QhV9JnhhXlvR4EIflg5X4OkpuV8HvR080h96KOLIBZR86hsV8XKUSURsSNatmASOy89prIbBx/6QJGqwHh+uThJ2pPJFHYUTS+PUn2MGtASlqCAgUIB/swXRjszoOVChu7icQzU+kjmggObcmrf9HYmc5mmk498mH3fucLcZC4JB7TEUXaz0J+cZTvXS7RLQEuSZUf3QSeczxyNVnZOtwn6ILNljsNYL0FBP01N0QcOQfF8lD185iPT6HVkmO4ZxIY9wyOFSwvD3AYfXhSkfC3ny6XlBEfoTeRxZrFQmFucmy3wiWtR8nhOsbgbbNKJiN1SgNhJ6fGU3YkiGQUUy+BDz+UnB2ELZl+vpZPzKNfHiAEtYQkKGGCCPkX31O7IgJYetinqkQw13MLw7lhmRfumCyATB1aIcNCttK/QOSkfhtixK0xASyzWFRh5DxcWteR2CWhJsuyYoB+ge9lHKjunywSd71JfhoqVoKAzDtIqdCb7ATm8aAgYg57kEvglLxUWh+cL3p6+IZ9DOV8l0L/+5ILiGHXz6DCnArOCRc1j2bO483JUScgcPRpgjubpMZWdw4zsUIQ51v6X/CbaTs6jrz6GD2gJS1DAwDQM2UepNHRkQIsboHowc5C/c8wd1Hmch643OfWCczx7+Mq72fff2S8mmiYc0BJfUNywa0zxaJeAliTLrgfOPUrb2khl53SXoBMdYJrA3EDlfNHXiMQqdpfp6/1M5c5ktWVX3ICxk9RUNF2YnzpbmB2eK8ywN0M+h3K+5GdSJb08MlinOmoeJYu7KEcVKwFjsZUdgw123LIzBAp0ch599TF8QEtYggIGGPPQYenEgJYeak2nmi1E4bLvdhHhOHDhMhOOD7PZ7Lt/PZX9kL+ZcEBLfIJ+0ucuaZeAliTLjnEexvIRy862KEdBy0op7z6EY16gpp7deMrILZyrmavBYufaoS/RgfroKH0uqbnPHQKFew6VfCUt6M3kESADdJrTqHn0LO6iHDWSmtLlBEzpYukxlR2DDNCZkQ8KbZkN1GU6OY8+QQ8f0BKWoIABYIZa3J2ODGg5An7YoyueKPSw8bgEEfQLmcxpNhJ858rxD7KHj4s3Ew5oiU/QfR3PtgloSbLsgLepxd2JWna2RTkKSkbGyvnymJKSnMn9QubQ245zeoX2vd7NfnDRcY6fyjIH88LiHPNEjhYWF8CH7gZlzpPRzxKLCo88yqP5Gs9P9JsHs+nlkTA1uliY4y+i5JFb3P3lyElq0ZVLhRH2UuRLS4+r7PpJC1KmBwP54hiUXQN3SRykl0dd0CMEtIQlKGAAOMvjrTovoOXtlUxmaGXoiBCFY26wqgQR9INC0GkU60fHr1zkbyYc0BKboA97k3057RLQkmTZARd4rFyksrMtylFQMkLGdZMw30JUquQEHWYi9LCZCO+cyp7avz+bPczeWhLzLuiyqNMF4e05WZhbWuRPawyCTp7JYi6XZwqhkGIeL40UCoVLonpFyKMYEvjL0ZyvWHBnxAyLV8Om9HjKbpmM4fLLLJN01kyu8SynWEgtj/reChECWsISFDAAzLn2h04MaDl9fuXQEVIl3flOrJ+tAib3o66v9qPsqSsfZLNZHsaacEBLXIJOxj8n9bR2CWhJsuyAIRY9F7HsbItyFJSMlLg1WpigEwyKk9cKeIdtXHKRveONXqeo2s15BqSzi56tKAZBd06wtSyScv00lcf5xemlGenLwudRDAn85chJYuOS0TlpLQvJ8qCkx1N2E8Vcidtk+6m6l/2GzI7Oo2YxixLQEpaggAEKt7h3bkCLiI4mmnDU93ukoDhnf/aDix9m371ymC/WnHBAS0yCTvR81JfYLgEtSZYdhVvcI5adbVGOQiv5dOnI1fxapFvz2MlbizZLJ+dRqY/RAlrCEhQwIL3XkQEtABnkse71wYxJE1yLLZ225rz/PhWGi84ZPsxLOKAlHkFfKhj0vG0CWpIsOwq3uEcsO9uiHIVW8unSkevtt0i35lHNlxVSLjsrhM6jVB+jBrSEJShggDInqUWnBbQQDhxdyQzRg9NDmaHT3imnV4aYR5YtLHM1c8h94wMywqOTmoGEA1riKLuFOTnarf0CWpIsO8qQpPQRys62KEehlXwasSJ2KbMX8oi0A1J9jBrQEpqggAHP4g50WkCLc/VQJpO5Cq0/GeMdgoVCWft/UCwk6i79ypcV/Wv28JlTfPeFhANa4ig7UlgjsEDvKH/ZXgEtiZadZ3EHIpSdbVGOQiv5RBAkUVRBjxTQEp6ggAEvxp296rCAlvMrPQfZRklUBRjudh/eKO/okLLxx4envMlPCQe0xCToDBbQ2HYBLcmWnRfjDoQvO9uiHIVW8okgSKLg9qkG9kZAC5Zd/ePQ7PntUxEEsUMIQQ8K9kgNDGgJCZZdK8ehaT2PtkU5CjHcMARB4iFifdwLwRvdGtDSKb8zCh1SdrZFOQqRMy/f8//1v73j//N/I18aQfYY2MFGENvYFuUohMhusBUFBd3IzLS+P3EzJG4NQ9oPFHQEsY1tUY5CiOwGxzmgoBuZNW9c1YAE4lVKhjDpduFYm5gcr+UMi3SmBgo6gtjGtihHQclILk+XHB43zLCQCZ6J0JWCvjC7qMz44EyNTouJnTOzdInaUekDc56GT7EZn/L5U3A+O2VqyTvWP9u8oA8GLybm0bKgf/zJp5XK9RtwfBOOb9U9/7O/3V5d/dw9/uLL1dWvvoDjr7+5s7r698/9Hzh9jM85aVnQP/v8a8f5fPU2PSbfSrnd2hWMFGEtS1ugoCOIbWyLchSUjLClASZR0CVGC4VFeU0GzgLM92Spo4ts3Yaz4gN01ic/cwk2l5HPn6KbzsyxTWfkY99nmxf0Mbp0BV2ZI1dHs1sV9Btrlco6kfFvyfHN6+SY/Pukzvlfg6wy3f7sKyKw5N835Phvq6t3bn8ppF6iR6wK0bKgf7H6hSToX90m/KO1KxjJWbVixBgU142VEUFSwLYoR0HJSC4/OemM5SdR0D1mQaZnC7obfHhxdniaCzrslrYEe6AOM10fHhFLMI7ALgjq+SNTdLkm+sbZAl38gXxqwfDZFk3ujbSoZUGvfHfTcW5VrsPxp+T4+0rl4+DzP7/zzee3Xdn+n9WvPqOCu0rG0d+s/o9D//+77xMRBP3vq3+TBN0w+g+Jb/XKNIlx2lo3VsYYwIAWpBG2RTkKSkZy+eX82DnyHwq6B2jwcGFRS6ZW9GlhcvfOmdYd5qOsLyCfv1S4RP+M0JfTbHls8LP7PmtZ0B0Q77uVNYfK+nf0xaeVu8GnEwUX2nqb6i1VcWp0h6TPV+/4PhFB0L9a/ToRQbdKjAvLdGVljA4GtDTBHg9osS3KUVAykstfy5+bKLsbHg2U8/kyLECczw8Ui2MT+UlY6W+PCTpAxs6G1GnFEH+SKveUvNw1MOKdxM8fLkw79FTqW3fT6FbF/s+GFfSJfL7I147uL+WoZ51tOQ0n5GDbCCndoRsrlP1rbjK+r9xzqKyvk/9vVtZu1v8NnqDDXzCMM8hoXT31GF/EMQMveo4eyqxcpW/0ZDJDztUVvgmD+bfB1TxB/+fn//SdAiuDLxdhcwz2ULNVRgfzpYHJfBG2hpKPnZy3RKeSTp79fG4sVyc8ISak+ogBLQaaCGgpCGacbg1oqQjq2MvkwBU5uCUw4CRCQIt8zc++8YJnImIpoMW2KEdByUguP5jL5Usld0s8WG+ftmj5/MRyfnKZbZW319oQ4CRIsI4i6AuLi2A1n1PPkRPE+ZcK00tLI4ujkAabHs0TQfd9NrSgL+dyk1yrabcsl8vRcnTbkHx+TE2Ht/w7JTHur63dp3+/q6zfuvXp2o0Gv8ET9P9H//zdE/T/p7chp3t6iGjTfRYcaENWhoiSU0W/Sv4epbtkHAr+bUzKlaC4219r5yyT53cgT27FpC7oOZJ39mzLx+y+cUH30sfoviGT5XrNc0yogo4BLRrNBLQUCrA9yQhUpi4NaCFHlE/BGRaAHLgiB7cEB5xEMJdJ15SDZ6JiKaDFtihHQbt/g+dIaw9t3xjsiHeimD9BFWCQikWJ7Tewt9oQxjQLePOl8pZl5ux8YR5c5bryT0s7TnrnD5NWaRoM9bPgQx9dBEHXew1RTO4nynSER4SpKIa3rA0ZoPqmpDuBI/SPv71XueeOAu6S1mS9zoiAwQX9G/Ch/+2OJ+i3wZOuIrchdOvDY5kVeJXJnD/tHMhk3g78bf+zSlsPIei3vyDjkTuf6WeRruiY058nD7Eq6PTWDEL3Xz6mlISge+kTsLPneNqCjgEtOs0EtLi2tHla77o1oKVSgaR7lTrdazlwRQ5uCfZPRRJ0cc2/rX75Ge29r/otZq1jJ6DFtihHQckIEXTSgrA2rZSHvXNKtFkTgg6P4N5qQ4DhwuKUIdkTaNKbL4wsOVzQ6RAB3OTOjNiD2FFG6HNLl9gIfYaa9RbnZoWge5+N2IZwCSte6xcp5IRBtuO0nB7MjUql8imbqvZd5fqt75ofof/zzurqV3e++kYI+ucGvVXaENjtGMzv9M8F5V0/34CP3hX0r2nj8dmX/h5DHp5i8lxrgl5mb2rHFE/QvfQimDROpC3oGNDio4mAFibozH3VrQEtTNBvVir1/F9S4Ioc3JK0oH8BPW3nyw4Oa7EtylFQMpJjpk1o03LSrn97XdAXFpldXEc2uU+dnKMtAzObU4MeG23PykN7rw2hzcoU864vzC+OzE7NkWP9s+HbkAFaekUmBWRgmS+PuwVbpKZ4tmuxlF6Pm99fh3C4W+BJv1k3KI4iqvbXf7/z5TeffSVe3mE2eBVfGyIEPVjKGezCn8tm/C/8YfSesV4zubM3tWPvPDXdfTNtQceAFjP1A1qmofKMdnVAy/o6Pb4Bx/XxAldkQTcEnEQLaJGuyarkZ77uewcFtNgW5Sho99wT9In8eAk4secFnej5qPENNShuilrtZlgsDjefzyhDe9HmeO2GCww6tM86oduQZXCPc2PtGDQWVMVha2Y2QFfS63MTQm/WmZLfqtRfWkbr/4vIdqLnfzOcHV7QV1ep0+6r1Tueovuc9B0u6BjQYqaJgBZuce/WgBagrsXdxasTt9WgOD3gJFJAi3LNf6ze/uKLL+/o1b2DAlpsi3IUlIzIgl6Smvq9LehLhQA919oQ1v2fYyrtirJrz9PO9wn6FPuU+lkndBvCGomSZKwdhCQowJLsPGfpDYBReShBJwN0ZnH/YtWo50TQL7hHLQo6C/MBROTdP1Z9gT66oA+EFHQ6Gk7d5I4BLWYaBbQ40Dee6uqAFvqyvsWd4QWuSIIeEHASPqBFvebnXxJtN3jQOyagxbYoR0HJiCzoJ5j70S2JvSvoC3OFuQXp5eKcN+TmbcIUJJ2FRuRkoeD50qcW2ZhbO3+2MD8lDfynSAsCwwHls5TQgk4e+TE5nGrME3RnUuqqjXFBN9bTm9BgfAsNxyeVezfpCKHRqEAS9M/+dmf1K3rw9VerX+kR6IyrmSF3qkyrI3QGs+/9z+qX/zQb9WVBJ3nvL4cS9AkQz9SD4jCgxUjDgBaH1sV5p5sDWijfNmVxF7rt+cKCAk7CB7Qo1/zH6ldf/MM/Qu+ggBbbohwFJSOyoJO+I514Uc45e7sNIePzEZgHM8pfsnZggSQRHZ6eXoDgWPqC6fMlOttlegTag1E2AUY/n7Qb5JQCU/FLZHwAK81pn6WEbEPolJlyvpTPT47RukBKMQ81osS7xIOOku4EWNK+rVxfX1+rgIZ/vFZZW1+v8Ikyg6YK9fXt27fv0AksVL3/Qfrpq/+A5oSMz7+kBvLbvkr+9kpmpafn0DFF0E/3UL9dz1V+kvG7GK7Djs6Uue2bKTNGrdRl145JhreTueK4e0/Mgj6Qo1a+STB9yuljtCpMTqQu6PQPBrRoNBPQ4sxBXezagBbgemOLu9zH1WLhDAEn4f1f8jXZhT8zBMV1jP/LtihHQcmIIujO4EQxz912e7cNWeLLVDDzuBihD/N00g6cJOJcWOTrXQzPL9JXtEWRYnDk893NXJbgSvOL00tiGC99lhKyDekfJ+pVIsVGym1gEqJtSnQAIOJwiIrL6U7ACP17IuCVtXssDI6ta3HLtfIN8J6AzOfcCk7r8t/v3P7Ctbt9wdP9q02cPr+SyaxcVtoQNz5HPFjG7+LfyNay+MK0loW7vod7UwbL+eLyGNyToDakxHUzp7Utgzkah2NP0DGgRdBUQAuzuHdvQAulCYu7EriiCboh4CSyoMM1/StKcVDQUyBEdnG1yWYxR+80SQKrTcZIqYnGpxO/qw7XUli2KkDQMaCF01xAy1l3pnq3BrRQGlvc1cAVLujBASehA1qUazYv6G0b0GJblKMQIru4H0SzLE37F6lsmvbeD6LB9OiO/a5ATgwUU+hXBAg6BrQwmgloocy5qt+tAS2URhZ3PXDF2zUpKOAkfECLfM1vVv/+mXlWS8cEtNgW5ShEzny3tyGIkVzQxJoO/y4jru0+cHJwfAQIOga0MJoJaHGExd3p2oAWR7W4G4NM5MAVObglIODEiRLQIl/zn3dW79Djr9RTOimgxbYoRyG5u4J0MzkwfHXfdxmhgj6ZO5e8ngcKOga0AM0EtDiexd3p2oAW1eJuDDKRA1eU4BZzwAklfECLfM1/ss1ZtFlxnRTQYluUo5DcXUG6mXrbTnTyd1kGt09NDAxo6bjvqkOiAS22RTkKyd0VBEFaJISgY0BLc2BAS8d9VyAJB7TYFuUoJHdXEARpkYj1EQV9T4IBLfFiW5SjkNxdQRCkRbA+Iq2DAS3xYluUoxAiu6qVDwnNzLS+r7NMe1v5kERAQUdaBwNa4sW2KEchRHbVOBwkNLP6jssKycXhpLDiGRIOFHQEsY1tUY6CmpP+0mQ+Xy7VN2eoM2WQepycKxTmYXLskr7wlDO1yFairHNOC4Jeaj5Yxa6gz/xA+ilv+I81Zv/ypvuc/evH3t5/z/pOoLN4COt6+k22RK3vWGZsvJjPT/idgSfGJ71txzlpmhhR0BHENrZFOQpqTugMv3I+P1m3DUNBb5qlQmF6pFBYcIRYS8tPLrGFLuqd04WCPguehDd8xyozROh7f/yBHr7V2/tmX2+vT9HX6UIb6+vfack3rxORJ/8+0Y5lYJmWSf/qHQNE5mk6XZWNbkAOS1sEriOfBCjoCGIb26IcBSUjY/kikfITE/VnBKCgN8tMgW4MNQsDblesL3lm9hFYibLuOV0o6G/89PMbb7oiLh8rzPzY2/cDW+DjP729/3Kcn/0xG+t8UWuVG5VPbzrO97CHhXwsMw6SPc52FJcYyC/30ztJNdy9SRN894x0iDhtDQkNBrQgHNuiHAUlI4P5ZiIeUNCbha1YNbVIh9+uWM8V+FrUo2w5q7rndKGgU5nmIi4fKxD95ut1/bf33/TMn3xD9ABBv8X2l4Q9LORjBRiaD/rXpjjhpbOb1A8rcadHxIVlkNBgQAvCsS3KUVAyMpif7BeHOabv9IEbzJeuTeaL4+xNFPRmmWf7L0/TP0ysFwoj/E13Jcq654QWdBoLkbsGhxPUvsw3aSQvJvr9bYh3Tr/rMz7H+nYDZe5TpisxO8vFfHnMcM3cWI5d0zvfCVjBkiKLuEnQ/9P7k1h/89+9/2Wn/Vc7iQj6x3e1oTfhLnjVb1bWbqrHfoK2Wb8GI3cWzTvgG8Uni1wfMaAlZjCgxcfMz329vT8aHyERxDL780+9vf/219L7n6xVKvf8veqbd+/zqqccK1wjDcWEf28595n3HaeLbVGOgpqTyfyk2xqfIA36OTJUGXSX0s2Xc3zvAGxDmmW6MDx19iRrGlj7MC3GAcNsN4i654RuQ8ZZLASU5TJ1BDN5HisG7GsgncPM0U4ZPkyvk2NLiC+TvwPUwzxpumaZXVM63wnYY4LSSND/2/vz7L9/6vvZHcHP/Pdfzlu+h44Fxa3f1z/8XWX91q1P127oxz6ume1RY8Wi19CkbHFXBB0DWuIFA1p8zPzY2/sm+fez/x0RxPJDb+9PNIhFr6Y3KpW19U8rfjvZrcotIeLysUyJBavoit5fZs/8uHacMrZFOQpqTsbITZ5kGzWShrtUzhMtn4CxzABskQOtG7YhzULE+ixpHTyxXiqMTIn3RhueE7YNYfswnvNMyifKeTpcnyDD88CdB91zrkG37QTs0zyWp9pGyh00BbrU/SLq23BN9fzwI/Sfe//yUx9pZ/pm4P3/9vbOmgR9/RYZI/hH33dJK7P+sf9YJ+dtSerdhHMkP56CnkjZ4i4LOga0xAsGtPj5AarYv3p7/6O9IQWx/Ay2sZ/B8yXzSeVb+N+3K/s9uiGcK+LysQSpV9doo6H3qM/BAnCDbvPlHaeMbVGOgpaV/nPUzEFvYTHv5Er5wQH6aA7CsIzcYrA/YhvSLESslySxHp4vCO/4DLer1zkndBtSYgVV9AbI7N0iVI4TAW2IewWQ43PQMS4xySuBcjss+jsnbWqsX1M9P5hGgv5mbx9JnP2JNiXk/beMgn6f6vTNT6FZkfmucv3Wd2KE7h3rDEKfRWOALkPl3chzKVvcZUHHgJZ4wYAWP2/1QvRFn68OSkEsDrz3Ru9P+odhaH63sqanX694Znb5WILVq/6iLtY5NmQcp7dPPk4Z26IcBX9uBsog37m8U75Wvlait9VtWwZNOzYiwYA5fZQPvAG+F+Msc53XPSd0G5Jjx6xKDOSISBUhxW08/G2IdI6zTC3mbPSd41OxS+xjg4bz5Wuq5wfTWNChnXmLmgLB5P6DweTOuKWPEFjCTTcozjvWGSuaux3918rcZcAdDymiCDoGtMQJBrT4+69vwPyIGSlkhfEfX8obAbm/W6noSZDiirh8rGTxHMuGZiOTtwxWtw9OFduiHAVDdvphZDeRH8sPTpRK9BgFPRxq+1CYPnuJNw8zhcWpRuc4sQj6MqnU1ONdT9Dlc5wx0p87wUwyE/nxEuCa3AcN58vXVM8PprGgQ9q/aHMTGBTH+F5vLNzgd3DeyccaY8VArZa8Cmlb3BUfOga0xAoGtBgCWv7S++Zbb/X99IOWLAexMP4VMDXSV/9c+fb/L0PEuv/cNb9Y51gne4IJunecMrZFOQqm/EDfiAzN805pIpfvR5N7WJgFz5EseCPuwNwLtQk+x4nF5M58UCVXfKlS+U3u8jm0+Rg7x5oBde9jr0UIumazeyU39qHDE/YXOlBn09Yc37S1e0ylv6toK8s0J+ilfJ2x9yTPaOoWd0XQMaAlVjCgxdQ5faOvt/dN3YOuBLEwgjrU6z6X17dQI5mIy8cyRGHOkdviE/Rx8JsPgO1PPk4Z26IcBSUj16BPxJ5AcrMnnYEcWG/cNmSS9RexDWkWFmPDhtxMrEfZyHxqsTDT6BxKDG0IDDLH2Gh6AgTK34bI59DPLZdZK3AiL9vEZEE3X1M9v+WguNmffmR35Yfen2bBh/4GX1jmL7192jW+rXz6sePcX6t8Dy/vr11n0XGfVO7dhPQb6rHMWJkZK/Xf2d/P8s/H5alb3NV56BjQEicY0GLgL70/vvUX/whdDmIB3vDZ4Bl3/UGpn0BtYyIuH8uQ310yCTrpfuTLxTL4zeXjlLEtylFQMjJOTanuTIHBfDHH/gMrX5Fa+ZhTD9uQpnFnwQw7IsZmxNXs2YbnUFpqQyZhrdIcHVtLVr4cKcJynlSfyTEq9EYrn3wONTkX+XqnsEZqrkyegTFqvSvnln3nK9f0zmc/yWDlm33zzTd/6v3xzTdn1WMHFnl1xZ3OpnnTnU3jLv2qj+NhWdd1sazrLT5/5uO1yhpNv64dq/fKvVkD6u88Rz0JRTF4T9/i7ltYBgNaYgMDWvy8xVZt8gXFyUEsFCLt/zJ9XvSnJa5DRWQiLh/LgMl9wOAfH5soTo73lyFz8nG62BblKCgZuTZBhgR51swNgvclD91L0nhgHE4oRuk6FRBvI9apWJwRMTh1z6G01Ia4QDtfopM4oSb0jxdp3HYeWoBBOreZjE+1zyrnQJQNH5kOThB9ow/CoHRx0zXdpkScTwmIw3F5Qz12pBG6M/MXut6FOzz44UfjuhY3b8kbr4gRuvMx25Dlpn5sulcl9Xdeg6ZRbNqSvsXdv1IcBrTEBQa0+HHNY76IUzmIxQE918fwgN/25dA4OLrFwnXSlVaPZYKC4hjyEo6G5RyTx7YoR6GpDKozaHC1yaic9FaoCia51SYT4JqNeteF+OsjBrTEBAa0+AkSdDmIBWxkRj2/VTHo+f2KQDlWOMfviWmOORmUl0zH6WFblKPQVAZ1Qcf9IKKxND3c+KQO2g/ixEDRxoJOXYhUHzGgJV4woMVvLvu5998z0gDcGMQy+2Pvj/7Ni4lyX69cl5Zp9ExkgGxmD1pYZpl1TdXf1j8gHH7KcZrYFuUoNJXB5ta4QPYiri0+IPwNaQ2pPmJAS8xgQIvvd/7np96fqB3wR/bSGMRCEqlF9s03tVE6GZ9/Sg3q6zf4S3m1h/qCzpd+HdR/2zL1+S73+47TxbYoR6GpDKKgI0FQQZ/MnUM9jwV5hI4BLTGDAS0+/sM2Z3FzaAxieYsHumgP2i1uTWdhLC2N0NmiNyJYxfttE8Wc8PnLx+liW5SjYOWGIQhioon6iAEt8YIBLYiGbVGOgu17hyCIIISgY0BLNDCgBdGwLcpRsH3vEAQRtCzoCOKBAS2xYFuUo2D73iEIIkBBRyKAAS2xYFuUo2D73iEIIsD6iCC2sS3KUbB975CUuZZrsFk5YhGsjwhiG9uiHAXb9w6JCbF9sG+26digbIIrhg6B/f56pXLvvv9Y4uaNdTEd9f4na+Qcw07k0mdv3r1vntWyV8H6iCC2sS3KUbB975CYCBb0nJKUC+uCvVWprH9aqdzXjyXYUo9Mw29UKmv0HJ+iq9e5hYIug/URQWxjW5SjYPveITHRrKA7ITcv+rhCN1b6BMRXPpa5u/bJXXcPcvLut/D/vTrXce7RxaBR0D2wPiKIbWyLchRs3zskJpoW9JB8C9J8c40OreVjGbpUFBd0B/7crazVuY5zvYImdwWsj3sKDGhpS2yLchRs3zskJmRBH8yX+Oqgg3wRyjw7SewZms8PFItjE/lJWF2RrsRY5ktMGleJvAcjbqLX36rHjrbq47q2pLO7z5I4R/ksvIuC7oH1sStou4CWwHOkIBZ1P+K9jG1RjoLte4fEhCrouTzdv6MM+zgU6X7JOfCcL4uNlunC4Mv5yWW2PSHdPyInln027uNAhPrmt98zr7d87Gj7MqiC/j0Xa3GO/FnWkqCge2B97AraLaAl6By5Gt4kkr9O/n0S7hd1E7ZFOQq27x0SE6qgww6bRbatg9qGlISgDzqkOYHdmsfyxTE4n+2FYByhEyH+lrQIXNDFsVN3hM4H8d458me/rXznoKDLYH3sCtotoCXoHDmI5dsKraF3Daq/57AtylGwfe+QmFAFHTYaPge7czUS9BxNO8fequPRI23DLUnQb0mCrp/nvbhbWbvpf1989hPanKCgS2B97AraLaAl6Bw5iGUdKiOpk2h0ty3KUbB975CY0Ezu9Nj901jQc9zPXgq+PpjKb0gm9xtOQ0G/v0YHBsHXuQ7noqB7YH3sCtotoCXwHCmIxT3XWKn3GLZFOQq27x0SE+MRBH0iP14C6mw/XKcNUZAEnej5jbrXqVTWCdcra+ut5LSbwfrYFbRbQEvQOXIQyzrrfN9DQUdBR+zjms1zJpP7OeU8k8m98X6LzGrnSFY+x2/Bc2RBJ82GX8/lz7KwHaC5PHY/WB+7gnYLaAk6Rw5i+QR86DfWUNBR0BH7XMtP9tPWIN8v2pBJpuTLynaKJkE/kVc039iGsLia7yqfasdOQBty/3rluqT34hztsw6a3GWwPnYF7RbQEnSOHMTy8Vqlcn3tOvrQUdCRdqCcz+fKzCk3mM8VqZVvEnSZdPaLudwkaUgGctTKN5nLLattiDOQp6eU3Tk0Risfn/lyVz+WLHj319fX1yrX19fvQ+Kn1KC+fkM9R/usg4Iug/WxK2i3gJagc5Qglvv31j795OZ1/2LNew7bohwF2/cOiYv+0iSPpSGNB4/DoYxNFIlgXwOlZuQ0QXcG6SncbWccoTvOjeveZivysTf6vsst6HdBtxk+S6DyWQcFXQbrY1fQbgEtQef4g1j8SzvuQWyLchRs3zskAQbDrleBWAbrY1fQbgEtAef4g1jIAB0t7ijoSHuBgt6pYH3sCtotoKXeOZKJ7OaNtcr1KPnuEmyLchRs3zskAVDQOxWsj91BmwW0BJ3DcAX9O3JC5Tt9Iai9iG1RjoLte4ckAAp6p4L1sTtos4CWoHMc98x1+ufe2vqtj+PJfodjW5SjYPveIQgiwPrYdWDvuuOwLcpRsH3vEAQRYH3sOlDQOw7bohwF2/cOQRAB1seuAwW947AtylGwfe8QBBFgfew6UNA7DtuiHAXb9w5BEAHWRwSxjW1RjoLte4cgiADrI4LYxrYoR8H2vUMQRID1EUFsY1uUo2D73iEIIsD6iCC2sS3KUbB97xAEEWB9RBDb2BblKNi+dwiCCLA+IohtbItyFGzfOwRBBFgfEcQ2tkU5CrbvHYIgAqyPCGIb26IcBdv3DkEQAdZHBLGNbVGOgu17hyCIAOsjgtjGtihHwfa9QxBEYLs5QBAEQRAEQRAEQRAEQRAEQRAEQRAEQRAEQRAEQRAEQRAEQRAEQRAEQRAEQRAEQRAEQRAEQRAEQRAEQRAEQRAEQRAEQRAEQRAEQRAEQRAEQRAEQRAEQaKyUa1WHwS896DOe93LxsNHj0jGN9mrX36FV1Vy+OjRo8eNP75JTt5I9AciCIIge4aNhw8fbskJT0jCw205hSbU9sUk6A8eCpZ+CfWL02fLd0uAbZBvLug1/ooKuqfy9UBBRxAEQWKDqvRTOeEZ1aTn2hk7/ICLNtU4+VNNC3pV4Te/TLYhO/Sn7vqS+3guQLt/F5naF0bQH2j9qmTYMvZMEARBkG6AStAL7XX1pZTwgL+WBX2TCZdyUghBr1Z/rUX58amwBT/0qZ78nHZ0HmwQ6O2Dbs9j+opKdOuC3twnooI2gSTRK0Wz7yH1qD387dFT8dxu//GIv9p69OjRiwYfrm9XRJCuY1MdkG/41Euc8GJzc3NLSpSv0pKgb1JeuYrehK/ZMi/ZD9UH0NSU4Y12ValsTp4fk9vAWyQU9DbluckD9YecILxWbSPoW55f689fOsQis/1Qd/4xHj51Gwp4bh9WvVcPmnqYUdCRPYUYgIuXr9Uxuz6EByIJunu4/dirqG3MNh17a1YLiirBRN5fRfkWFPQ25aX2ZL+Ah7amnQHdUqlSbOvqFLOg6y4vlU3FCNb3JMYvTozHRjMYNCyeoD+RX7Uu6Ol4nGjZYyVDLKE50Ulb8Pi1PGYXLnSFOATdbRz7Wv7J6UKztvVKGY5TtlXrQlRBRkFvU55rJfMc9ETWyKf8tVQpNvTyjFnQ619OFfRqtQnLtHV2jGawGr25u0+oJ6u2j4WtvNxyX7Uu6Ok8/mgTQGyijsBppdqVh6PaCN4lHkFn/e82b22owaL23Jc/rdqioHcpL9Qn1nXAPNZOqNEj6kJxE+0L+mtwbO24g/RajN+dCFtu10NLVtsVeld3lfcaP8y6oxAFHeluFCc6fRi3t+Qxu+Zjl1Ll16Livfjz4R/1pqNpzaM+2tlX++WPhw+FxG8v0eltRivZxh/0LV9ngCYbfsCLX/4wTJTbgNRf6vYoNqBDU/OZEloW9Pp3Jrygb/9Cb9Ifv9T0Nzb+pJlTklHQW2dHvWdPwQHzzEugfT2ft8W+oAszM+2QVn+P8bsTgfaTdnxmMDDE18Sr5iQ8GBR0pPt5IHd7qQsdxhxC5LwxtHhQPb9WlbdbrqBvuVO5HgV6qjRBf82vyVqo7d/g42512HrmfkXfH9pVNsQksae/bRiSnyrz4Wp/iBlm1UfiUjURbkPau2A34y7rcrxU2gLdpmmAN+f6nantU66y4b/cg321Hd2dSDsUfscHzbKY/l7tk2cMbP/Gc/c7vxVqsWHMdZOoTnRaN6iE10QKfUB8kZ3tI+j7avB0tbkfnTqwXvnNYFo+qbxHseehoCPdD33+xOBzE8R9xxuUSy70hoJe8+ZiPw1qQDQpoVVslx/se/KUSxqh9kz6kmdyPZa+R7ncYynxqeeNe/HUdPqGkho4OuZKulWVrX2tCrr5zgQKOoiE4k6kbZ1pPsBL5ZNeYyXfCn4lFPRQqE705/RxUG417an5nvY2EvR92zvVtlcY+mw+95vBtHxGvYko6MgegLbu297xFsgEd5tLLvSGgg7q8sqdjxYwRtekRBmhswDi1zuQss30fGfzNdNBT9G50r/aZO+pyTvcccgFsQZj49diqhxL3X4qUl/rja/MFldSOmiu8dRWBZ3dmdfanQkWdHonFHfia/MdZboN3tKq3Fjtur/BvRWPxS+RCMoxoqI60aFqvJJabDq2VILegUBBr/1ichS5rqY/m147sRVBh6dBfcBf/ClPvQtwU9HzzN6cF2ZPGHP+/Kmdvv3Ln4ZUDQhUge6p2jdqVdC3ye0NnqgXQdCDvHMGpyAKOmKVTU/9NphuPPcsvpILvdHCMkQiN6G6PKcy4l9ZDdCkpFqVIoSJ8j6mF9h+4iZUd2lN2YbA4mc1/hmQZfZN+2pbm/xyIJqP4axt+mHeBaDpr3g1frJb9VJfumfUtl4GCjr9MjhNHzTX9aFrgh5wZ+osLKNF1fv0gUG1Zmer5r54/JpfDHT+JXy+9lJWevShh+C1fNOe0urwWCoN2uNzXeiiUqg9J8mlVOOOEG3O2QvPV1TzUrVKVq9DraIJOj2/jx/Q+V59Uo9u2+ym2kd9UsJT9ejPmiH5mfIomZ0/T7zUZ8GLSLFAFZiVpga9NUKtPvw2yvbBgLtWNZjBDCk8ywHeuQf8VvT9yZPMvw9BUkNyooMLfZ/sRK96h40E3XsXBjXmfnJV+eBzcXnQ750n6ju8cm3JFwe1kpfC8c7ZER1o+nvcIS5pj1/XvNO3xe+QW0LpBAU6/not8iSFQrUk6AF3po6gC8MAI6Cl0YOEat5P89LppbghEwU9BLta+W2A8MjvuiXVUNAl548cSxHkK4pL0J/wSgfPi3Bl7fNdS3nEthSf1IYx+TfvdKPzR3Gb1Xn0+PNN+7vixrQs6NJP+7XRXasazGBBgSpbyge9+/2iT0rmAw7z70OQ1JCc6JuutAsnujwLvaGge486raD+0HhKVf4gNHA74oJyg0KHRZ6iwTC3BocwM9Vg0aIfkDrPYmRdq5q8zy8Cf6DKY+88zerdkqB7b72Uvrje0q9KWFxQS/OyajRBahMTJAsMCnoItqSy2YI7XpNuo+RCbyToTM9dN4gkJdxB4jqrRC2Ic4T+mh9swLfsvGKX1r5aemrcd3Y0b44rbq9cT5jo4pqdPy/5JTSXkAat0zv8KiL7rQr6Fvsy6abXuWs09/riEgGBKqzYXvm8c27/4ZV781xFN/8+BEmPqniweX0STnR5FnpDQfcqx5NqkBup6n3QNWQ9ERd87Z22oV6Phn27Nf25eiJnq6pOl9/gldPs0dqomo1rOtJMGq26tyTo5jtTT9CVpi0oJG7TtLYWdFbka217twYFPQTb0jO7y26t50SX32y0sAwZrj6gT4LqBmHGqFfQ+3wCYv/Cfz1+zaD6pxLkQ3/AfsSrJ/RX0HoHGsj8Ueyr1R/F3TlPdvkbG94H2JPtfo3Z+QPbHfCn+MXjnaBHT0Sc6mawuj502T5FDvueur/gBRVYEe9SZ2EZzQwWFKhCkz3vnFhqAHSeZ5TePD41EH3oiF3EEG6DP9HCiS4P9hoJutSASRqiQRuBR4BrH9vVfoN3PdkL773WTuTsVrV4GvGDqqZ1NegP1BexMPBEygf9iCSfrQi6dmd4xuoJuvIDA1oaf6bFNyoNymvxw1HQwyA50fvYrfWc6PLwvZGgey6hx1LpQpfgZY29ADGSHSTSJcIK+rawabFB6m6NvwPWrk33Ze2V9IDDj9oUJ+6r1bwPiCu/8Lq7ZufPblWZZlar7TMjzGn7tGWnWxF00tHgOZHzX0fQtdmhAYEqNa0t45l4Jd/KbekZQUFH7CLU8oHsMIZ6JQ8YGgm6LMDmurHPv9sarxJaXdNl23ML0A/V/Nf1ad4r/gvpO17MCoe2RY8aatvLqmYB9eSzFUEPuDP1BB3e3Pa+yng36QjoKexVr39yQ08IeAtpAs+Jvl311hTX32tC0MXTQ0WCn0rF3TM5bUvnxSToYA+Ap1hz/8Brz5VTE+exbBnsYFrv9Ll4tl8afUKvmtvgwAtU8dmiWhH0HVH/5btfb+lXxQwWFKgSYMyTgiEpT7wuGgo6YhehlptCe6psYK4s5N7K9qlNCrrYbEzWL/5Srnpi7pC+Eqd83Ycyffw8d/+43/9Qppw8d1P/rLdQBfiua/yVatVvRdAD7kxdQX/iNW1BwbduwH/12a9LNSlxR78Vj8T3oKCHwRuFb7kPledE75PuaCNBlxJ2vcd9Ry3d3ariIJEuEU7QNyByy3OgyeX/uuozzTODt9ytkFGC1vZJth+z82enueUPH3s/QzODtSToUk48m1RdQVfMYPVC4gy3wmRLqPm+EUEsUHWbl6ocPUXbAGUh99gEHYJkXj54viFVCO2C/BcpCfxHGHr9taoJ9p6IIX76myR8uzy179cl4y/dJ49A4DuU5ixpQfcapcBV4kj35jXPxaM/xO0y3goU9PB4fvJdXkqv3UKVXegNBV1zKbGCoD1UeSz8oqo4SKR3WhP0PvBruVHmOzXxrVLl2a7K/VU5M1v6zw/IlbCpm50/m9WmVnaTrWvaVPRWBN3cmtTdnEUeRgQFqtAL+FfO3daKDX7NE983IogF3NZmw3u8H7AxuxIvHZugB/2EeqfJgh7Q1AQJ+r4X3sIt0laSG0IMtQm1HvSMh78IHgXdDV+G4xD0B1wCgloaSu3xjsgFn+ZrvBUo6BEQDtI+XpZc2RXlayTo0p1/XpWDPJUzPW2KJOgyfIECbaOlJ/pXi3zKjgQJX3SG0FSz8+exMVVH+RlacGsrgi5dUnLe1xV0OSwuKFDF7J1Tp8wr34OCjljG9fM+UKeobWsd37QFPXCErsTBMqDD/EDHe/v5Sy58kjRuPxctn3EK2wuDMIqlKRMXdGEPDGxpGE92ec/Ejf2jh75bse37RqRpuL5ti9vHbe+K8jUS9Jr30nt6fCLplVFMgv5amQimTWfb1T8nlnkyOHno8Pk32ZnzG79gzZ3Fpjp/aqzSPXtYdwU8JVBln7pDSwuCLo+Xpbfqb5/qhcUFBqq4oYRPf1O8c7DQhfFWoKAjtnGd6JtS5xgqmboXepqC/qrq93Y19KEbv1Dw4gHzOKs1esNNNVkGhVVexhQiuC8JQed+1uCWRrC9tQtN5+/8UkEdABT0UPBxOJdxT9plF3pDQZdeNhD0J/U/04ygs+1THz/YkMobVkxXXypfLbonAc+JPvIH2BWMzp99G8J+9PufQX1S8JbVlN/g/cYWBF2+3U0LujCD1QlU8dbM6fv1F/mDPnZ934ggNmAaIFcliI9TTXRpCroaUc6++7W4QM3/+Z1qE+46WFTD97vA9m6aZLdTNcCHNMkLujuVvk5LI1HztpbX750ECnooeHdy1ysk5kRXXOjhBN1XJA+Ut4yfaXEeunJpTd99FvQHbu5MfcI6gr6vJi1351nZa16n+OlD/wUp1Fr/yPNrPazKO7QkLejCDFarGsP6GU8k75y0oLSPTd83IogNwMS2IWsiONEVF3qqgv6gqvqNG85Df2lO1tg2fr1vOQsGTEzZkKGOM1P07L5EBJ2FxdUJiVMRd+BxcIOyi4IeCte53OfdWKbtavBYW43Qwwh6MyP0Xd2Z453nc/5Qalsi1duOQcYTSw9l8ov6/ebchBV0qDW062LYulXmheedY63MY9Ot2PJ9I4LYAPTygW5fB/nbVpL4g0rrQU27QJyCDtIpfcGO6FrQmqftsVgn2fQ1NV/qjvFHqd0Z4JXXbYhZ0HcMfQqInGrQ0nhs8RP1eyehr/+BNAdTum3p7jHruxo8Fp+gx+BDb1LQg33oBiNPQGdaQnX+iNTnbEBrCu00Baqo+xep32/OTWhB57NDGwSqwKmPoe/xdJt/Q0C1REFHbANO9E3F8EyrVdW3HCt/UHVliFvQtSmysPNKDQ7l5S8kas1t+vza+PU7JnOb1p0BwDzIDmMWdFMLTbP6qImWxvtt7L689rWo8jlBO9UjwbCR+JZURkzcFRd6OEGvE+WuLbQmP0mxCLpvclozUe5B8y0EzPauu7+2QdJr/tONgSqiP5q4oLuzQ93t3hqxtcPvwPPgD6CgI9ZxK5K2pUdV6cHrgq6bxGMVdNiOhev2xlPp6x7L7+wT26eBD1lZEG6LLY8tp4llzp/I9XqrWjXs9vrAkIeaJ/IJCLpv7Ow2doEtzYOa9MKb9Au7aykeyy1pDk8T7RaiwVw1u3IZUye65sGRnmF9Pf1gQadnyqYl6Zqa/gYsSmegSUHfrqqmHHUeumFlZN88NzNm95ce5+oC6+ooji3ZDhCvoJs8TqwfbJ5H70d01KTlAkznoKAjVnHlW+pXuzEuUsWUH1So+79BFAt8JnZBZ4ug/VGjh7Cqm9gCFdacrv7u6viTR/xjMAR4xHPw5NenrO4+qPb9yhNhf6TnLPUZD8at/fnUWJtfm5qllyKfMQs6bUKfsjnv3njcNUcGtjSkCPiiOLCtNTczQEegj1/6xa99YjvLHVFsdWcSIRowdu2TO7Ev3f29pFKXn+GmBb3OSnHPlXeUddSlleYMNCno+iNOqzxbdwaqt199a+ZkHbOXSAuxd9mq+qxjcm8iXkE3eZzADFY3JE5BtF+vjdmhBG9kgSApwWZhyMFXT5iYBG0Y6lnKxD5O8Qr6tjuH9Xe22JW31bmYIfPoN7YQlptcc7eAfPb77+4CWRsiZ08f/f7wd7YdzO6+4FQZyH9NT/U2F49Z0KXYIOlsyFJwS8Py++jXX1mGvXvkls4zcoseebeC55sReFXED72h6ijuuZsilZYm6HW2DZOfnsdVeYgOUiqFaXjjZHlzswbBEM0K+gPlq6HHIK3lbhii02T/Tkc6akeE89j4kw2BKpIZLGZBN3qcYEP7gBvmoya+CJbSMfepAhs/BEkJttKasqlQVRcTRcJqIuozIUHft/3KE5/qa9kn5323qkyPqyrCdiDh7iClpfr1XB4p6Xdlw3c3YhB0b+E66ewt7bWGmokdzd8qI77G64gFXhXx424DLvXwXuh3Vn2G6bHc3AcLOjN2uek12jPjlQ7sKfz6T5Rno97qgc0LOnSaf6tJX71Tk3L3u5dbd6M0+ICUTDLCBFJx/oh9xuXUWp/pmTMFqkhmsJgFfcNU1fkik0EGD8U75913KJxn8oe2ecdkp87VECQV4JlWusqvdKXTJGxjdxNOSUrQSe3hGrejtBeEJ0Lsd3Zlp6AUYbP5nNUpaZE4kshPlhaJI5k0TGAH5TbYunf5TYlb0IlovNx8XVXPlgcrBty4W+D185r81vaul+tXD6RLbD/e3ERBb5Vt905KSe79lVLkZxgGv396LpQ6EevQ+XoEZ0GwiPdMwDt/wPf/Sh516TO6y0ulWUFnziymShvPlCce3ulzHTovfn3qfgy6FU//4L/ij2fu90jOnxe/ixtFd0/gqc+MTYK2ujzDM4PFLOhmjxNrS4IDVSTv3B9S8UCfR0yu3/7zkfgFu17Z14KuiiB7kBfP6fRO04Ix21v0nec+G94GmxCqpr94whK3m0htKxoHsdU24BZtGOyv7OY936gl89v2FqznJA+K2UwsWUZkval5/akH+nv7tO4gu9LT35l3ROpWuxd5BG6hV0/kz3jWKEPHuWlBdw02T13Hk5w/7rX5nX27HOtB9Yo6c5563U92ru788VJhyzcRBuNh9kTvVM1T8aMKutnjxLIUGKiie+fExbZY6TwVt4L/gm2v7HGOKIIgnGaDb5Gk2fW1+s+rSvO+T9ObJ6oLpZ6gq1O3ZO/zhmRmeaG6vMRSZZEEXXVTKdL6XH5Hcg1p6ydKgi7gzh819bW/Yw7m7povWZjB4hZ0o8cJuk3BgSqa90rq87zQ1sQRXe8n4h6hoCMI4uLboxGxBRvG1aSUF74WWxPtrU3JhVJX0PdtCAfQrmov4rq987hW1+Wl0oKgS1/9UjNVSQ6sV5I/p/ZA8vNwb5W3SBzdDIafrKYafpA5UEUKSI1b0I0ep4BZ9/wTindOvUdbXpzP612pt1d7vrm5g4KOIIgEHT41Xs8W6QKYC+lJzfdGbcOcHh+1oK/mDiyf04b92Aeqs8rs/NneYH6fdla2xms3bbBbZPDO8VvRxLbvCILsZWCzu5rtX4EgXQ2utoQgSKJsuzss45JTCJIsuAUhgiCJwifH4sgBQRKFxkL4Zs4hCILEhivo7jI4CIIkA6yng4EqCIIkx4vNzc2XD9AQiCDJsfXw4cOnOEBHEARBkM7GnY+2gyHqCIIgCNLBMEF/jYYwBEEQBOlkHm9ubj4w74GKIAiCIAiCIAiCIAiCIAiCIAiCIAiCIAiCIAiCIAiCIAiCIAiCIAiCIAiCIAiCIAiCIAiCIAiCIAiCIAiCIAiCIAiCIAiCIAiCIAiCIAiCIAiCIAiCIAiCIAiCIAiCIAiCIAiCIAiCIAiCIAiCIAiCIAiCIAiCIAiCIAiCIAiCIAiCIAiCIAiCIAiCIAiCIAiCIAiCIAiCIAiCIAiCIAiCIAiCIAiCIAiCIAiCIAiCIAiCIAiCIAiCIAiCIAiCIAiCIAiCIAiCIAiCIAiCIAiCIAiCIAiCIAiCIAiCIAiCIAiCIAiCIAiCIAiCIAiCIAiCIAiCIAiCIAiCIAiCIAiCIAiCIAiCIAiCIPb4/9Alzlu99gucAAAAAElFTkSuQmCC"/> </defs> <style> tspan { white-space:pre } </style> <use id="With Process Affinity set " href="#img1" x="0" y="0" /> </svg>
3
0
hf_public_repos/blog/assets/21_bert_cpu_scaling_part_1
hf_public_repos/blog/assets/21_bert_cpu_scaling_part_1/imgs/pytorch_vs_tf_oob.svg
<svg width="4310" height="2426" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" overflow="hidden"><defs><clipPath id="clip0"><rect x="656" y="-1" width="4310" height="2426"/></clipPath><clipPath id="clip1"><rect x="656" y="0" width="4307" height="2423"/></clipPath><clipPath id="clip2"><rect x="905" y="169" width="3729" height="1921"/></clipPath><clipPath id="clip3"><rect x="905" y="169" width="3729" height="1921"/></clipPath><linearGradient x1="1037" y1="2055.5" x2="1093" y2="2055.5" gradientUnits="userSpaceOnUse" spreadMethod="reflect" id="fill4"><stop offset="0" stop-color="#FF7101"/><stop offset="0.0344828" stop-color="#FF7200"/><stop offset="0.0689655" stop-color="#FF7400"/><stop offset="0.103448" stop-color="#FF7600"/><stop offset="0.137931" stop-color="#FF7800"/><stop offset="0.172414" stop-color="#FF7900"/><stop offset="0.206897" stop-color="#FF7B00"/><stop offset="0.241379" stop-color="#FF7D00"/><stop offset="0.275862" stop-color="#FF7E00"/><stop offset="0.310345" stop-color="#FF8000"/><stop offset="0.344828" stop-color="#FF8100"/><stop offset="0.37931" stop-color="#FF8200"/><stop offset="0.413793" stop-color="#FF8300"/><stop offset="0.448276" stop-color="#FF8500"/><stop offset="0.482759" stop-color="#FF8600"/><stop offset="0.517241" stop-color="#FF8700"/><stop offset="0.551724" stop-color="#FF8800"/><stop offset="0.586207" stop-color="#FF8900"/><stop offset="0.62069" stop-color="#FF8A00"/><stop offset="0.655172" stop-color="#FF8A00"/><stop offset="0.689655" stop-color="#FF8B00"/><stop offset="0.724138" stop-color="#FF8C00"/><stop offset="0.758621" stop-color="#FF8C00"/><stop offset="0.793103" stop-color="#FF8D00"/><stop offset="0.827586" stop-color="#FF8D00"/><stop offset="0.862069" stop-color="#FF8E00"/><stop offset="0.896552" stop-color="#FF8E00"/><stop offset="0.931035" stop-color="#FF8E00"/><stop offset="0.965517" stop-color="#FF8E00"/><stop offset="1" stop-color="#FF8F00"/></linearGradient><clipPath id="clip5"><rect x="905" y="169" width="3729" height="1921"/></clipPath><linearGradient x1="1286" y1="2052.5" x2="1341" y2="2052.5" gradientUnits="userSpaceOnUse" spreadMethod="reflect" id="fill6"><stop offset="0" stop-color="#FF7101"/><stop offset="0.0344828" stop-color="#FF7200"/><stop offset="0.0689655" stop-color="#FF7400"/><stop offset="0.103448" stop-color="#FF7600"/><stop offset="0.137931" stop-color="#FF7800"/><stop offset="0.172414" stop-color="#FF7900"/><stop offset="0.206897" stop-color="#FF7B00"/><stop offset="0.241379" stop-color="#FF7D00"/><stop offset="0.275862" stop-color="#FF7E00"/><stop offset="0.310345" stop-color="#FF8000"/><stop offset="0.344828" stop-color="#FF8100"/><stop offset="0.37931" stop-color="#FF8200"/><stop offset="0.413793" stop-color="#FF8300"/><stop offset="0.448276" stop-color="#FF8500"/><stop offset="0.482759" stop-color="#FF8600"/><stop offset="0.517241" stop-color="#FF8700"/><stop offset="0.551724" stop-color="#FF8800"/><stop offset="0.586207" stop-color="#FF8900"/><stop offset="0.62069" stop-color="#FF8A00"/><stop offset="0.655172" stop-color="#FF8A00"/><stop offset="0.689655" stop-color="#FF8B00"/><stop offset="0.724138" stop-color="#FF8C00"/><stop offset="0.758621" stop-color="#FF8C00"/><stop offset="0.793103" stop-color="#FF8D00"/><stop offset="0.827586" stop-color="#FF8D00"/><stop offset="0.862069" stop-color="#FF8E00"/><stop offset="0.896552" stop-color="#FF8E00"/><stop offset="0.931035" stop-color="#FF8E00"/><stop offset="0.965517" stop-color="#FF8E00"/><stop offset="1" stop-color="#FF8F00"/></linearGradient><clipPath id="clip7"><rect x="905" y="169" width="3729" height="1921"/></clipPath><linearGradient x1="1534" y1="2010.5" x2="1590" y2="2010.5" gradientUnits="userSpaceOnUse" spreadMethod="reflect" id="fill8"><stop offset="0" stop-color="#FF7101"/><stop offset="0.0344828" stop-color="#FF7200"/><stop offset="0.0689655" stop-color="#FF7400"/><stop offset="0.103448" stop-color="#FF7600"/><stop offset="0.137931" stop-color="#FF7800"/><stop offset="0.172414" stop-color="#FF7900"/><stop offset="0.206897" stop-color="#FF7B00"/><stop offset="0.241379" stop-color="#FF7D00"/><stop offset="0.275862" stop-color="#FF7E00"/><stop offset="0.310345" stop-color="#FF8000"/><stop offset="0.344828" stop-color="#FF8100"/><stop offset="0.37931" stop-color="#FF8200"/><stop offset="0.413793" stop-color="#FF8300"/><stop offset="0.448276" stop-color="#FF8500"/><stop offset="0.482759" stop-color="#FF8600"/><stop offset="0.517241" stop-color="#FF8700"/><stop offset="0.551724" stop-color="#FF8800"/><stop offset="0.586207" stop-color="#FF8900"/><stop offset="0.62069" stop-color="#FF8A00"/><stop offset="0.655172" stop-color="#FF8A00"/><stop offset="0.689655" stop-color="#FF8B00"/><stop offset="0.724138" stop-color="#FF8C00"/><stop offset="0.758621" stop-color="#FF8C00"/><stop offset="0.793103" stop-color="#FF8D00"/><stop offset="0.827586" stop-color="#FF8D00"/><stop offset="0.862069" stop-color="#FF8E00"/><stop offset="0.896552" stop-color="#FF8E00"/><stop offset="0.931035" stop-color="#FF8E00"/><stop offset="0.965517" stop-color="#FF8E00"/><stop offset="1" stop-color="#FF8F00"/></linearGradient><clipPath id="clip9"><rect x="905" y="169" width="3729" height="1921"/></clipPath><linearGradient x1="1782" y1="1969.5" x2="1838" y2="1969.5" gradientUnits="userSpaceOnUse" spreadMethod="reflect" id="fill10"><stop offset="0" stop-color="#FF7101"/><stop offset="0.0344828" stop-color="#FF7200"/><stop offset="0.0689655" stop-color="#FF7400"/><stop offset="0.103448" stop-color="#FF7600"/><stop offset="0.137931" stop-color="#FF7800"/><stop offset="0.172414" stop-color="#FF7900"/><stop offset="0.206897" stop-color="#FF7B00"/><stop offset="0.241379" stop-color="#FF7D00"/><stop offset="0.275862" stop-color="#FF7E00"/><stop offset="0.310345" stop-color="#FF8000"/><stop offset="0.344828" stop-color="#FF8100"/><stop offset="0.37931" stop-color="#FF8200"/><stop offset="0.413793" stop-color="#FF8300"/><stop offset="0.448276" stop-color="#FF8500"/><stop offset="0.482759" stop-color="#FF8600"/><stop offset="0.517241" stop-color="#FF8700"/><stop offset="0.551724" stop-color="#FF8800"/><stop offset="0.586207" stop-color="#FF8900"/><stop offset="0.62069" stop-color="#FF8A00"/><stop offset="0.655172" stop-color="#FF8A00"/><stop offset="0.689655" stop-color="#FF8B00"/><stop offset="0.724138" stop-color="#FF8C00"/><stop offset="0.758621" stop-color="#FF8C00"/><stop offset="0.793103" stop-color="#FF8D00"/><stop offset="0.827586" stop-color="#FF8D00"/><stop offset="0.862069" stop-color="#FF8E00"/><stop offset="0.896552" stop-color="#FF8E00"/><stop offset="0.931035" stop-color="#FF8E00"/><stop offset="0.965517" stop-color="#FF8E00"/><stop offset="1" stop-color="#FF8F00"/></linearGradient><clipPath id="clip11"><rect x="905" y="169" width="3729" height="1921"/></clipPath><linearGradient x1="2031" y1="1957.5" x2="2086" y2="1957.5" gradientUnits="userSpaceOnUse" spreadMethod="reflect" id="fill12"><stop offset="0" stop-color="#FF7101"/><stop offset="0.0344828" stop-color="#FF7200"/><stop offset="0.0689655" stop-color="#FF7400"/><stop offset="0.103448" stop-color="#FF7600"/><stop offset="0.137931" stop-color="#FF7800"/><stop offset="0.172414" stop-color="#FF7900"/><stop offset="0.206897" stop-color="#FF7B00"/><stop offset="0.241379" stop-color="#FF7D00"/><stop offset="0.275862" stop-color="#FF7E00"/><stop offset="0.310345" stop-color="#FF8000"/><stop offset="0.344828" stop-color="#FF8100"/><stop offset="0.37931" stop-color="#FF8200"/><stop offset="0.413793" stop-color="#FF8300"/><stop offset="0.448276" stop-color="#FF8500"/><stop offset="0.482759" stop-color="#FF8600"/><stop offset="0.517241" stop-color="#FF8700"/><stop offset="0.551724" stop-color="#FF8800"/><stop offset="0.586207" stop-color="#FF8900"/><stop offset="0.62069" stop-color="#FF8A00"/><stop offset="0.655172" stop-color="#FF8A00"/><stop offset="0.689655" stop-color="#FF8B00"/><stop offset="0.724138" stop-color="#FF8C00"/><stop offset="0.758621" stop-color="#FF8C00"/><stop offset="0.793103" stop-color="#FF8D00"/><stop offset="0.827586" stop-color="#FF8D00"/><stop offset="0.862069" stop-color="#FF8E00"/><stop offset="0.896552" stop-color="#FF8E00"/><stop offset="0.931035" stop-color="#FF8E00"/><stop offset="0.965517" stop-color="#FF8E00"/><stop offset="1" stop-color="#FF8F00"/></linearGradient><clipPath id="clip13"><rect x="905" y="169" width="3729" height="1921"/></clipPath><linearGradient x1="2279" y1="2038.5" x2="2335" y2="2038.5" gradientUnits="userSpaceOnUse" spreadMethod="reflect" id="fill14"><stop offset="0" stop-color="#FF7101"/><stop offset="0.0344828" stop-color="#FF7200"/><stop offset="0.0689655" stop-color="#FF7400"/><stop offset="0.103448" stop-color="#FF7600"/><stop offset="0.137931" stop-color="#FF7800"/><stop offset="0.172414" stop-color="#FF7900"/><stop offset="0.206897" stop-color="#FF7B00"/><stop offset="0.241379" stop-color="#FF7D00"/><stop offset="0.275862" stop-color="#FF7E00"/><stop offset="0.310345" stop-color="#FF8000"/><stop offset="0.344828" stop-color="#FF8100"/><stop offset="0.37931" stop-color="#FF8200"/><stop offset="0.413793" stop-color="#FF8300"/><stop offset="0.448276" stop-color="#FF8500"/><stop offset="0.482759" stop-color="#FF8600"/><stop offset="0.517241" stop-color="#FF8700"/><stop offset="0.551724" stop-color="#FF8800"/><stop offset="0.586207" stop-color="#FF8900"/><stop offset="0.62069" stop-color="#FF8A00"/><stop offset="0.655172" stop-color="#FF8A00"/><stop offset="0.689655" stop-color="#FF8B00"/><stop offset="0.724138" stop-color="#FF8C00"/><stop offset="0.758621" stop-color="#FF8C00"/><stop offset="0.793103" stop-color="#FF8D00"/><stop offset="0.827586" stop-color="#FF8D00"/><stop offset="0.862069" stop-color="#FF8E00"/><stop offset="0.896552" stop-color="#FF8E00"/><stop offset="0.931035" stop-color="#FF8E00"/><stop offset="0.965517" stop-color="#FF8E00"/><stop offset="1" stop-color="#FF8F00"/></linearGradient><clipPath id="clip15"><rect x="905" y="169" width="3729" height="1921"/></clipPath><linearGradient x1="2528" y1="2013.5" x2="2583" y2="2013.5" gradientUnits="userSpaceOnUse" spreadMethod="reflect" id="fill16"><stop offset="0" stop-color="#FF7101"/><stop offset="0.0344828" stop-color="#FF7200"/><stop offset="0.0689655" stop-color="#FF7400"/><stop offset="0.103448" stop-color="#FF7600"/><stop offset="0.137931" stop-color="#FF7800"/><stop offset="0.172414" stop-color="#FF7900"/><stop offset="0.206897" stop-color="#FF7B00"/><stop offset="0.241379" stop-color="#FF7D00"/><stop offset="0.275862" stop-color="#FF7E00"/><stop offset="0.310345" stop-color="#FF8000"/><stop offset="0.344828" stop-color="#FF8100"/><stop offset="0.37931" stop-color="#FF8200"/><stop offset="0.413793" stop-color="#FF8300"/><stop offset="0.448276" stop-color="#FF8500"/><stop offset="0.482759" stop-color="#FF8600"/><stop offset="0.517241" stop-color="#FF8700"/><stop offset="0.551724" stop-color="#FF8800"/><stop offset="0.586207" stop-color="#FF8900"/><stop offset="0.62069" stop-color="#FF8A00"/><stop offset="0.655172" stop-color="#FF8A00"/><stop offset="0.689655" stop-color="#FF8B00"/><stop offset="0.724138" stop-color="#FF8C00"/><stop offset="0.758621" stop-color="#FF8C00"/><stop offset="0.793103" stop-color="#FF8D00"/><stop offset="0.827586" stop-color="#FF8D00"/><stop offset="0.862069" stop-color="#FF8E00"/><stop offset="0.896552" stop-color="#FF8E00"/><stop offset="0.931035" stop-color="#FF8E00"/><stop offset="0.965517" stop-color="#FF8E00"/><stop offset="1" stop-color="#FF8F00"/></linearGradient><clipPath id="clip17"><rect x="905" y="169" width="3729" height="1921"/></clipPath><linearGradient x1="2776" y1="1969.5" x2="2832" y2="1969.5" gradientUnits="userSpaceOnUse" spreadMethod="reflect" id="fill18"><stop offset="0" stop-color="#FF7101"/><stop offset="0.0344828" stop-color="#FF7200"/><stop offset="0.0689655" stop-color="#FF7400"/><stop offset="0.103448" stop-color="#FF7600"/><stop offset="0.137931" stop-color="#FF7800"/><stop offset="0.172414" stop-color="#FF7900"/><stop offset="0.206897" stop-color="#FF7B00"/><stop offset="0.241379" stop-color="#FF7D00"/><stop offset="0.275862" stop-color="#FF7E00"/><stop offset="0.310345" stop-color="#FF8000"/><stop offset="0.344828" stop-color="#FF8100"/><stop offset="0.37931" stop-color="#FF8200"/><stop offset="0.413793" stop-color="#FF8300"/><stop offset="0.448276" stop-color="#FF8500"/><stop offset="0.482759" stop-color="#FF8600"/><stop offset="0.517241" stop-color="#FF8700"/><stop offset="0.551724" stop-color="#FF8800"/><stop offset="0.586207" stop-color="#FF8900"/><stop offset="0.62069" stop-color="#FF8A00"/><stop offset="0.655172" stop-color="#FF8A00"/><stop offset="0.689655" stop-color="#FF8B00"/><stop offset="0.724138" stop-color="#FF8C00"/><stop offset="0.758621" stop-color="#FF8C00"/><stop offset="0.793103" stop-color="#FF8D00"/><stop offset="0.827586" stop-color="#FF8D00"/><stop offset="0.862069" stop-color="#FF8E00"/><stop offset="0.896552" stop-color="#FF8E00"/><stop offset="0.931035" stop-color="#FF8E00"/><stop offset="0.965517" stop-color="#FF8E00"/><stop offset="1" stop-color="#FF8F00"/></linearGradient><clipPath id="clip19"><rect x="905" y="169" width="3729" height="1921"/></clipPath><linearGradient x1="3024" y1="1867.5" x2="3080" y2="1867.5" gradientUnits="userSpaceOnUse" spreadMethod="reflect" id="fill20"><stop offset="0" stop-color="#FF7101"/><stop offset="0.0344828" stop-color="#FF7200"/><stop offset="0.0689655" stop-color="#FF7400"/><stop offset="0.103448" stop-color="#FF7600"/><stop offset="0.137931" stop-color="#FF7800"/><stop offset="0.172414" stop-color="#FF7900"/><stop offset="0.206897" stop-color="#FF7B00"/><stop offset="0.241379" stop-color="#FF7D00"/><stop offset="0.275862" stop-color="#FF7E00"/><stop offset="0.310345" stop-color="#FF8000"/><stop offset="0.344828" stop-color="#FF8100"/><stop offset="0.37931" stop-color="#FF8200"/><stop offset="0.413793" stop-color="#FF8300"/><stop offset="0.448276" stop-color="#FF8500"/><stop offset="0.482759" stop-color="#FF8600"/><stop offset="0.517241" stop-color="#FF8700"/><stop offset="0.551724" stop-color="#FF8800"/><stop offset="0.586207" stop-color="#FF8900"/><stop offset="0.62069" stop-color="#FF8A00"/><stop offset="0.655172" stop-color="#FF8A00"/><stop offset="0.689655" stop-color="#FF8B00"/><stop offset="0.724138" stop-color="#FF8C00"/><stop offset="0.758621" stop-color="#FF8C00"/><stop offset="0.793103" stop-color="#FF8D00"/><stop offset="0.827586" stop-color="#FF8D00"/><stop offset="0.862069" stop-color="#FF8E00"/><stop offset="0.896552" stop-color="#FF8E00"/><stop offset="0.931035" stop-color="#FF8E00"/><stop offset="0.965517" stop-color="#FF8E00"/><stop offset="1" stop-color="#FF8F00"/></linearGradient><clipPath id="clip21"><rect x="905" y="169" width="3729" height="1921"/></clipPath><linearGradient x1="3273" y1="1808" x2="3328" y2="1808" gradientUnits="userSpaceOnUse" spreadMethod="reflect" id="fill22"><stop offset="0" stop-color="#FF7101"/><stop offset="0.0344828" stop-color="#FF7200"/><stop offset="0.0689655" stop-color="#FF7400"/><stop offset="0.103448" stop-color="#FF7600"/><stop offset="0.137931" stop-color="#FF7800"/><stop offset="0.172414" stop-color="#FF7900"/><stop offset="0.206897" stop-color="#FF7B00"/><stop offset="0.241379" stop-color="#FF7D00"/><stop offset="0.275862" stop-color="#FF7E00"/><stop offset="0.310345" stop-color="#FF8000"/><stop offset="0.344828" stop-color="#FF8100"/><stop offset="0.37931" stop-color="#FF8200"/><stop offset="0.413793" stop-color="#FF8300"/><stop offset="0.448276" stop-color="#FF8500"/><stop offset="0.482759" stop-color="#FF8600"/><stop offset="0.517241" stop-color="#FF8700"/><stop offset="0.551724" stop-color="#FF8800"/><stop offset="0.586207" stop-color="#FF8900"/><stop offset="0.62069" stop-color="#FF8A00"/><stop offset="0.655172" stop-color="#FF8A00"/><stop offset="0.689655" stop-color="#FF8B00"/><stop offset="0.724138" stop-color="#FF8C00"/><stop offset="0.758621" stop-color="#FF8C00"/><stop offset="0.793103" stop-color="#FF8D00"/><stop offset="0.827586" stop-color="#FF8D00"/><stop offset="0.862069" stop-color="#FF8E00"/><stop offset="0.896552" stop-color="#FF8E00"/><stop offset="0.931035" stop-color="#FF8E00"/><stop offset="0.965517" stop-color="#FF8E00"/><stop offset="1" stop-color="#FF8F00"/></linearGradient><clipPath id="clip23"><rect x="905" y="169" width="3729" height="1921"/></clipPath><linearGradient x1="3521" y1="2010.5" x2="3577" y2="2010.5" gradientUnits="userSpaceOnUse" spreadMethod="reflect" id="fill24"><stop offset="0" stop-color="#FF7101"/><stop offset="0.0344828" stop-color="#FF7200"/><stop offset="0.0689655" stop-color="#FF7400"/><stop offset="0.103448" stop-color="#FF7600"/><stop offset="0.137931" stop-color="#FF7800"/><stop offset="0.172414" stop-color="#FF7900"/><stop offset="0.206897" stop-color="#FF7B00"/><stop offset="0.241379" stop-color="#FF7D00"/><stop offset="0.275862" stop-color="#FF7E00"/><stop offset="0.310345" stop-color="#FF8000"/><stop offset="0.344828" stop-color="#FF8100"/><stop offset="0.37931" stop-color="#FF8200"/><stop offset="0.413793" stop-color="#FF8300"/><stop offset="0.448276" stop-color="#FF8500"/><stop offset="0.482759" stop-color="#FF8600"/><stop offset="0.517241" stop-color="#FF8700"/><stop offset="0.551724" stop-color="#FF8800"/><stop offset="0.586207" stop-color="#FF8900"/><stop offset="0.62069" stop-color="#FF8A00"/><stop offset="0.655172" stop-color="#FF8A00"/><stop offset="0.689655" stop-color="#FF8B00"/><stop offset="0.724138" stop-color="#FF8C00"/><stop offset="0.758621" stop-color="#FF8C00"/><stop offset="0.793103" stop-color="#FF8D00"/><stop offset="0.827586" stop-color="#FF8D00"/><stop offset="0.862069" stop-color="#FF8E00"/><stop offset="0.896552" stop-color="#FF8E00"/><stop offset="0.931035" stop-color="#FF8E00"/><stop offset="0.965517" stop-color="#FF8E00"/><stop offset="1" stop-color="#FF8F00"/></linearGradient><clipPath id="clip25"><rect x="905" y="169" width="3729" height="1921"/></clipPath><linearGradient x1="3770" y1="1998" x2="3825" y2="1998" gradientUnits="userSpaceOnUse" spreadMethod="reflect" id="fill26"><stop offset="0" stop-color="#FF7101"/><stop offset="0.0344828" stop-color="#FF7200"/><stop offset="0.0689655" stop-color="#FF7400"/><stop offset="0.103448" stop-color="#FF7600"/><stop offset="0.137931" stop-color="#FF7800"/><stop offset="0.172414" stop-color="#FF7900"/><stop offset="0.206897" stop-color="#FF7B00"/><stop offset="0.241379" stop-color="#FF7D00"/><stop offset="0.275862" stop-color="#FF7E00"/><stop offset="0.310345" stop-color="#FF8000"/><stop offset="0.344828" stop-color="#FF8100"/><stop offset="0.37931" stop-color="#FF8200"/><stop offset="0.413793" stop-color="#FF8300"/><stop offset="0.448276" stop-color="#FF8500"/><stop offset="0.482759" stop-color="#FF8600"/><stop offset="0.517241" stop-color="#FF8700"/><stop offset="0.551724" stop-color="#FF8800"/><stop offset="0.586207" stop-color="#FF8900"/><stop offset="0.62069" stop-color="#FF8A00"/><stop offset="0.655172" stop-color="#FF8A00"/><stop offset="0.689655" stop-color="#FF8B00"/><stop offset="0.724138" stop-color="#FF8C00"/><stop offset="0.758621" stop-color="#FF8C00"/><stop offset="0.793103" stop-color="#FF8D00"/><stop offset="0.827586" stop-color="#FF8D00"/><stop offset="0.862069" stop-color="#FF8E00"/><stop offset="0.896552" stop-color="#FF8E00"/><stop offset="0.931035" stop-color="#FF8E00"/><stop offset="0.965517" stop-color="#FF8E00"/><stop offset="1" stop-color="#FF8F00"/></linearGradient><clipPath id="clip27"><rect x="905" y="169" width="3729" height="1921"/></clipPath><linearGradient x1="4018" y1="1942" x2="4074" y2="1942" gradientUnits="userSpaceOnUse" spreadMethod="reflect" id="fill28"><stop offset="0" stop-color="#FF7101"/><stop offset="0.0344828" stop-color="#FF7200"/><stop offset="0.0689655" stop-color="#FF7400"/><stop offset="0.103448" stop-color="#FF7600"/><stop offset="0.137931" stop-color="#FF7800"/><stop offset="0.172414" stop-color="#FF7900"/><stop offset="0.206897" stop-color="#FF7B00"/><stop offset="0.241379" stop-color="#FF7D00"/><stop offset="0.275862" stop-color="#FF7E00"/><stop offset="0.310345" stop-color="#FF8000"/><stop offset="0.344828" stop-color="#FF8100"/><stop offset="0.37931" stop-color="#FF8200"/><stop offset="0.413793" stop-color="#FF8300"/><stop offset="0.448276" stop-color="#FF8500"/><stop offset="0.482759" stop-color="#FF8600"/><stop offset="0.517241" stop-color="#FF8700"/><stop offset="0.551724" stop-color="#FF8800"/><stop offset="0.586207" stop-color="#FF8900"/><stop offset="0.62069" stop-color="#FF8A00"/><stop offset="0.655172" stop-color="#FF8A00"/><stop offset="0.689655" stop-color="#FF8B00"/><stop offset="0.724138" stop-color="#FF8C00"/><stop offset="0.758621" stop-color="#FF8C00"/><stop offset="0.793103" stop-color="#FF8D00"/><stop offset="0.827586" stop-color="#FF8D00"/><stop offset="0.862069" stop-color="#FF8E00"/><stop offset="0.896552" stop-color="#FF8E00"/><stop offset="0.931035" stop-color="#FF8E00"/><stop offset="0.965517" stop-color="#FF8E00"/><stop offset="1" stop-color="#FF8F00"/></linearGradient><clipPath id="clip29"><rect x="905" y="169" width="3729" height="1921"/></clipPath><linearGradient x1="4266" y1="1523" x2="4322" y2="1523" gradientUnits="userSpaceOnUse" spreadMethod="reflect" id="fill30"><stop offset="0" stop-color="#FF7101"/><stop offset="0.0344828" stop-color="#FF7200"/><stop offset="0.0689655" stop-color="#FF7400"/><stop offset="0.103448" stop-color="#FF7600"/><stop offset="0.137931" stop-color="#FF7800"/><stop offset="0.172414" stop-color="#FF7900"/><stop offset="0.206897" stop-color="#FF7B00"/><stop offset="0.241379" stop-color="#FF7D00"/><stop offset="0.275862" stop-color="#FF7E00"/><stop offset="0.310345" stop-color="#FF8000"/><stop offset="0.344828" stop-color="#FF8100"/><stop offset="0.37931" stop-color="#FF8200"/><stop offset="0.413793" stop-color="#FF8300"/><stop offset="0.448276" stop-color="#FF8500"/><stop offset="0.482759" stop-color="#FF8600"/><stop offset="0.517241" stop-color="#FF8700"/><stop offset="0.551724" stop-color="#FF8800"/><stop offset="0.586207" stop-color="#FF8900"/><stop offset="0.62069" stop-color="#FF8A00"/><stop offset="0.655172" stop-color="#FF8A00"/><stop offset="0.689655" stop-color="#FF8B00"/><stop offset="0.724138" stop-color="#FF8C00"/><stop offset="0.758621" stop-color="#FF8C00"/><stop offset="0.793103" stop-color="#FF8D00"/><stop offset="0.827586" stop-color="#FF8D00"/><stop offset="0.862069" stop-color="#FF8E00"/><stop offset="0.896552" stop-color="#FF8E00"/><stop offset="0.931035" stop-color="#FF8E00"/><stop offset="0.965517" stop-color="#FF8E00"/><stop offset="1" stop-color="#FF8F00"/></linearGradient><clipPath id="clip31"><rect x="905" y="169" width="3729" height="1921"/></clipPath><linearGradient x1="4515" y1="1250" x2="4570" y2="1250" gradientUnits="userSpaceOnUse" spreadMethod="reflect" id="fill32"><stop offset="0" stop-color="#FF7101"/><stop offset="0.0344828" stop-color="#FF7200"/><stop offset="0.0689655" stop-color="#FF7400"/><stop offset="0.103448" stop-color="#FF7600"/><stop offset="0.137931" stop-color="#FF7800"/><stop offset="0.172414" stop-color="#FF7900"/><stop offset="0.206897" stop-color="#FF7B00"/><stop offset="0.241379" stop-color="#FF7D00"/><stop offset="0.275862" stop-color="#FF7E00"/><stop offset="0.310345" stop-color="#FF8000"/><stop offset="0.344828" stop-color="#FF8100"/><stop offset="0.37931" stop-color="#FF8200"/><stop offset="0.413793" stop-color="#FF8300"/><stop offset="0.448276" stop-color="#FF8500"/><stop offset="0.482759" stop-color="#FF8600"/><stop offset="0.517241" stop-color="#FF8700"/><stop offset="0.551724" stop-color="#FF8800"/><stop offset="0.586207" stop-color="#FF8900"/><stop offset="0.62069" stop-color="#FF8A00"/><stop offset="0.655172" stop-color="#FF8A00"/><stop offset="0.689655" stop-color="#FF8B00"/><stop offset="0.724138" stop-color="#FF8C00"/><stop offset="0.758621" stop-color="#FF8C00"/><stop offset="0.793103" stop-color="#FF8D00"/><stop offset="0.827586" stop-color="#FF8D00"/><stop offset="0.862069" stop-color="#FF8E00"/><stop offset="0.896552" stop-color="#FF8E00"/><stop offset="0.931035" stop-color="#FF8E00"/><stop offset="0.965517" stop-color="#FF8E00"/><stop offset="1" stop-color="#FF8F00"/></linearGradient><clipPath id="clip33"><rect x="656" y="0" width="4307" height="2423"/></clipPath><clipPath id="clip34"><rect x="656" y="0" width="4307" height="2423"/></clipPath><clipPath id="clip35"><rect x="656" y="0" width="4307" height="2423"/></clipPath><clipPath id="clip36"><rect x="656" y="0" width="4307" height="2423"/></clipPath><clipPath id="clip37"><rect x="656" y="0" width="4307" height="2423"/></clipPath><clipPath id="clip38"><rect x="656" y="0" width="4307" height="2423"/></clipPath><clipPath id="clip39"><rect x="656" y="0" width="4307" height="2423"/></clipPath><clipPath id="clip40"><rect x="656" y="0" width="4307" height="2423"/></clipPath><clipPath id="clip41"><rect x="656" y="0" width="4307" height="2423"/></clipPath><clipPath id="clip42"><rect x="656" y="0" width="4307" height="2423"/></clipPath><clipPath id="clip43"><rect x="656" y="0" width="4307" height="2423"/></clipPath><clipPath id="clip44"><rect x="656" y="0" width="4307" height="2423"/></clipPath><clipPath id="clip45"><rect x="656" y="0" width="4307" height="2423"/></clipPath><clipPath id="clip46"><rect x="656" y="0" width="4307" height="2423"/></clipPath><clipPath id="clip47"><rect x="656" y="0" width="4307" height="2423"/></clipPath><clipPath id="clip48"><rect x="656" y="0" width="4307" height="2423"/></clipPath><clipPath id="clip49"><rect x="656" y="0" width="4307" height="2423"/></clipPath><clipPath id="clip50"><rect x="656" y="0" width="4307" height="2423"/></clipPath><clipPath id="clip51"><rect x="656" y="0" width="4307" height="2423"/></clipPath><clipPath id="clip52"><rect x="656" y="0" width="4307" height="2423"/></clipPath><clipPath id="clip53"><rect x="656" y="0" width="4307" height="2423"/></clipPath><clipPath id="clip54"><rect x="656" y="0" width="4307" height="2423"/></clipPath><clipPath id="clip55"><rect x="656" y="0" width="4307" height="2423"/></clipPath><clipPath id="clip56"><rect x="656" y="0" width="4307" height="2423"/></clipPath><clipPath id="clip57"><rect x="656" y="0" width="4307" height="2423"/></clipPath><clipPath id="clip58"><rect x="656" y="0" width="4307" height="2423"/></clipPath><clipPath id="clip59"><rect x="656" y="0" width="4307" height="2423"/></clipPath><clipPath id="clip60"><rect x="656" y="0" width="4307" height="2423"/></clipPath><clipPath id="clip61"><rect x="656" y="0" width="4307" height="2423"/></clipPath><clipPath id="clip62"><rect x="656" y="0" width="4307" height="2423"/></clipPath><clipPath id="clip63"><rect x="656" y="0" width="4307" height="2423"/></clipPath><clipPath id="clip64"><rect x="656" y="0" width="4307" height="2423"/></clipPath><clipPath id="clip65"><rect x="656" y="0" width="4307" height="2423"/></clipPath><clipPath id="clip66"><rect x="656" y="0" width="4307" height="2423"/></clipPath><clipPath id="clip67"><rect x="656" y="0" width="4307" height="2423"/></clipPath><clipPath id="clip68"><rect x="656" y="0" width="4307" height="2423"/></clipPath><clipPath id="clip69"><rect x="656" y="0" width="4307" height="2423"/></clipPath><clipPath id="clip70"><rect x="656" y="0" width="4307" height="2423"/></clipPath><clipPath id="clip71"><rect x="656" y="0" width="4307" height="2423"/></clipPath><clipPath id="clip72"><rect x="656" y="0" width="4307" height="2423"/></clipPath><clipPath id="clip73"><rect x="656" y="0" width="4307" height="2423"/></clipPath><clipPath id="clip74"><rect x="656" y="0" width="4307" height="2423"/></clipPath><clipPath id="clip75"><rect x="656" y="0" width="4307" height="2423"/></clipPath><clipPath id="clip76"><rect x="656" y="0" width="4307" height="2423"/></clipPath><clipPath id="clip77"><rect x="656" y="0" width="4307" height="2423"/></clipPath><clipPath id="clip78"><rect x="656" y="0" width="4307" height="2423"/></clipPath><clipPath id="clip79"><rect x="656" y="0" width="4307" height="2423"/></clipPath><clipPath id="clip80"><rect x="656" y="0" width="4307" height="2423"/></clipPath><clipPath id="clip81"><rect x="656" y="0" width="4307" height="2423"/></clipPath><clipPath id="clip82"><rect x="656" y="0" width="4307" height="2423"/></clipPath><clipPath id="clip83"><rect x="656" y="0" width="4307" height="2423"/></clipPath><clipPath id="clip84"><rect x="656" y="0" width="4307" height="2423"/></clipPath><clipPath id="clip85"><rect x="656" y="0" width="4307" height="2423"/></clipPath><clipPath id="clip86"><rect x="656" y="0" width="4307" height="2423"/></clipPath><clipPath id="clip87"><rect x="656" y="0" width="4307" height="2423"/></clipPath><clipPath id="clip88"><rect x="656" y="0" width="4307" height="2423"/></clipPath><clipPath id="clip89"><rect x="656" y="0" width="4307" height="2423"/></clipPath><clipPath id="clip90"><rect x="656" y="0" width="4307" height="2423"/></clipPath><clipPath id="clip91"><rect x="656" y="0" width="4307" height="2423"/></clipPath><clipPath id="clip92"><rect x="656" y="0" width="4307" height="2423"/></clipPath><clipPath id="clip93"><rect x="656" y="0" width="4307" height="2423"/></clipPath><clipPath id="clip94"><rect x="656" y="0" width="4307" height="2423"/></clipPath><clipPath id="clip95"><rect x="656" y="0" width="4307" height="2423"/></clipPath><clipPath id="clip96"><rect x="656" y="0" width="4307" height="2423"/></clipPath><clipPath id="clip97"><rect x="656" y="0" width="4307" height="2423"/></clipPath><clipPath id="clip98"><rect x="656" y="0" width="4307" height="2423"/></clipPath><clipPath id="clip99"><rect x="656" y="0" width="4307" height="2423"/></clipPath><clipPath id="clip100"><rect x="656" y="0" width="4307" height="2423"/></clipPath><linearGradient x1="4703" y1="1310.5" x2="4726" y2="1310.5" gradientUnits="userSpaceOnUse" spreadMethod="reflect" id="fill101"><stop offset="0" stop-color="#FF7101"/><stop offset="0.0344828" stop-color="#FF7200"/><stop offset="0.0689655" stop-color="#FF7400"/><stop offset="0.103448" stop-color="#FF7600"/><stop offset="0.137931" stop-color="#FF7800"/><stop offset="0.172414" stop-color="#FF7900"/><stop offset="0.206897" stop-color="#FF7B00"/><stop offset="0.241379" stop-color="#FF7D00"/><stop offset="0.275862" stop-color="#FF7E00"/><stop offset="0.310345" stop-color="#FF8000"/><stop offset="0.344828" stop-color="#FF8100"/><stop offset="0.37931" stop-color="#FF8200"/><stop offset="0.413793" stop-color="#FF8300"/><stop offset="0.448276" stop-color="#FF8500"/><stop offset="0.482759" stop-color="#FF8600"/><stop offset="0.517241" stop-color="#FF8700"/><stop offset="0.551724" stop-color="#FF8800"/><stop offset="0.586207" stop-color="#FF8900"/><stop offset="0.62069" stop-color="#FF8A00"/><stop offset="0.655172" stop-color="#FF8A00"/><stop offset="0.689655" stop-color="#FF8B00"/><stop offset="0.724138" stop-color="#FF8C00"/><stop offset="0.758621" stop-color="#FF8C00"/><stop offset="0.793103" stop-color="#FF8D00"/><stop offset="0.827586" stop-color="#FF8D00"/><stop offset="0.862069" stop-color="#FF8E00"/><stop offset="0.896552" stop-color="#FF8E00"/><stop offset="0.931035" stop-color="#FF8E00"/><stop offset="0.965517" stop-color="#FF8E00"/><stop offset="1" stop-color="#FF8F00"/></linearGradient><clipPath id="clip102"><rect x="656" y="0" width="4307" height="2423"/></clipPath></defs><g clip-path="url(#clip0)" transform="translate(-656 1)"><rect x="657" y="0" width="4307" height="2423" fill="#FFFFFF"/><g clip-path="url(#clip1)"><path d="M905.5 1813.56 4631.5 1813.56M905.5 1539.55 4631.5 1539.55M905.5 1265.54 4631.5 1265.54M905.5 991.533 4631.5 991.533M905.5 717.524 4631.5 717.524M905.5 443.515 4631.5 443.515M905.5 169.5 4631.5 169.5" stroke="#D9D9D9" stroke-width="3.4375" stroke-linejoin="round" stroke-miterlimit="10" fill="none"/></g><g clip-path="url(#clip2)"><path d="M966.032 2037.07 1022.03 2037.07 1022.03 2087 966.032 2087ZM1215.04 2033.07 1270.04 2033.07 1270.04 2087 1215.04 2087ZM1463.05 2005.07 1519.05 2005.07 1519.05 2087 1463.05 2087ZM1712.06 1997.07 1767.06 1997.07 1767.06 2087 1712.06 2087ZM1960.06 1980.06 2016.07 1980.06 2016.07 2087 1960.06 2087ZM2208.07 2004.07 2264.07 2004.07 2264.07 2087 2208.07 2087ZM2457.08 2007.07 2512.08 2007.07 2512.08 2087 2457.08 2087ZM2705.09 1994.07 2761.09 1994.07 2761.09 2087 2705.09 2087ZM2954.1 1968.06 3009.1 1968.06 3009.1 2087 2954.1 2087ZM3202.1 1861.06 3258.11 1861.06 3258.11 2087 3202.1 2087ZM3450.11 1999.07 3506.12 1999.07 3506.12 2087 3450.11 2087ZM3699.12 2000.07 3754.12 2000.07 3754.12 2087 3699.12 2087ZM3947.13 1996.07 4003.13 1996.07 4003.13 2087 3947.13 2087ZM4195.14 1778.06 4251.14 1778.06 4251.14 2087 4195.14 2087ZM4444.15 1610.05 4500.15 1610.05 4500.15 2087 4444.15 2087Z" fill="#C00000"/></g><g clip-path="url(#clip3)"><rect x="1037" y="2024" width="56" height="63" fill="url(#fill4)"/></g><g clip-path="url(#clip5)"><rect x="1286" y="2018" width="55" height="68.9996" fill="url(#fill6)"/></g><g clip-path="url(#clip7)"><rect x="1534" y="1934" width="55.9996" height="153" fill="url(#fill8)"/></g><g clip-path="url(#clip9)"><rect x="1782" y="1852" width="56" height="235" fill="url(#fill10)"/></g><g clip-path="url(#clip11)"><rect x="2031" y="1828" width="55.0001" height="259" fill="url(#fill12)"/></g><g clip-path="url(#clip13)"><rect x="2279" y="1990" width="56" height="97" fill="url(#fill14)"/></g><g clip-path="url(#clip15)"><rect x="2528" y="1940" width="55" height="147" fill="url(#fill16)"/></g><g clip-path="url(#clip17)"><rect x="2776" y="1852" width="56" height="235" fill="url(#fill18)"/></g><g clip-path="url(#clip19)"><rect x="3024" y="1648" width="56" height="439" fill="url(#fill20)"/></g><g clip-path="url(#clip21)"><rect x="3273" y="1529" width="55" height="558" fill="url(#fill22)"/></g><g clip-path="url(#clip23)"><rect x="3521" y="1934" width="56" height="153" fill="url(#fill24)"/></g><g clip-path="url(#clip25)"><rect x="3770" y="1909" width="55" height="178" fill="url(#fill26)"/></g><g clip-path="url(#clip27)"><rect x="4018" y="1797" width="56" height="290" fill="url(#fill28)"/></g><g clip-path="url(#clip29)"><rect x="4266" y="959" width="56" height="1128" fill="url(#fill30)"/></g><g clip-path="url(#clip31)"><rect x="4515" y="413" width="55" height="1674" fill="url(#fill32)"/></g><g clip-path="url(#clip33)"><path d="M905.5 2087.5 4631.5 2087.5" stroke="#D9D9D9" stroke-width="3.4375" stroke-linejoin="round" stroke-miterlimit="10" fill="none" fill-rule="evenodd"/></g><g clip-path="url(#clip34)"><path d="M905.5 2087.5 905.5 2167.5M2147.57 2087.5 2147.57 2167.5M3389.61 2087.5 3389.61 2167.5M4631.5 2087.5 4631.5 2167.5" stroke="#D9D9D9" stroke-width="3.4375" stroke-linejoin="round" stroke-miterlimit="10" fill="none"/></g><g clip-path="url(#clip35)"><path d="M905.5 2167.5 905.5 2247.5M2147.57 2167.5 2147.57 2247.5M3389.61 2167.5 3389.61 2247.5M4631.5 2167.5 4631.5 2247.5" stroke="#D9D9D9" stroke-width="3.4375" stroke-linejoin="round" stroke-miterlimit="10" fill="none"/></g><g clip-path="url(#clip36)"><text fill="#404040" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 973.345 2005)">93</text></g><g clip-path="url(#clip37)"><text fill="#404040" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 1221.75 2001)">99</text></g><g clip-path="url(#clip38)"><text fill="#404040" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 1459.7 1973)">151</text></g><g clip-path="url(#clip39)"><text fill="#404040" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 1708.1 1966)">164</text></g><g clip-path="url(#clip40)"><text fill="#404040" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 1956.5 1948)">196</text></g><g clip-path="url(#clip41)"><text fill="#404040" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 2204.9 1972)">153</text></g><g clip-path="url(#clip42)"><text fill="#404040" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 2453.3 1975)">147</text></g><g clip-path="url(#clip43)"><text fill="#404040" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 2701.7 1962)">171</text></g><g clip-path="url(#clip44)"><text fill="#404040" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 2950.1 1936)">218</text></g><g clip-path="url(#clip45)"><text fill="#404040" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 3198.5 1829)">413</text></g><g clip-path="url(#clip46)"><text fill="#404040" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 3446.91 1967)">161</text></g><g clip-path="url(#clip47)"><text fill="#404040" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 3695.31 1969)">159</text></g><g clip-path="url(#clip48)"><text fill="#404040" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 3943.71 1965)">166</text></g><g clip-path="url(#clip49)"><text fill="#404040" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 4192.11 1747)">564</text></g><g clip-path="url(#clip50)"><text fill="#404040" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 4440.51 1578)">871</text></g><g clip-path="url(#clip51)"><text fill="#404040" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 1033.63 1992)">115</text></g><g clip-path="url(#clip52)"><text fill="#404040" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 1282.03 1986)">127</text></g><g clip-path="url(#clip53)"><text fill="#404040" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 1530.43 1902)">280</text></g><g clip-path="url(#clip54)"><text fill="#404040" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 1778.83 1820)">429</text></g><g clip-path="url(#clip55)"><text fill="#404040" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 2027.23 1796)">474</text></g><g clip-path="url(#clip56)"><text fill="#404040" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 2275.63 1958)">178</text></g><g clip-path="url(#clip57)"><text fill="#404040" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 2524.03 1908)">269</text></g><g clip-path="url(#clip58)"><text fill="#404040" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 2772.44 1820)">429</text></g><g clip-path="url(#clip59)"><text fill="#404040" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 3020.84 1616)">802</text></g><g clip-path="url(#clip60)"><text fill="#404040" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 3258.79 1497)">1019</text></g><g clip-path="url(#clip61)"><text fill="#404040" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 3517.64 1902)">281</text></g><g clip-path="url(#clip62)"><text fill="#404040" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 3766.04 1877)">325</text></g><g clip-path="url(#clip63)"><text fill="#404040" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 4014.44 1765)">531</text></g><g clip-path="url(#clip64)"><text fill="#404040" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 4252.39 927)">2060</text></g><g clip-path="url(#clip65)"><text fill="#404040" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 4500.79 381)">3056</text></g><g clip-path="url(#clip66)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 846.244 2099)">0</text></g><g clip-path="url(#clip67)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 804.444 1825)">500</text></g><g clip-path="url(#clip68)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 783.544 1551)">1000</text></g><g clip-path="url(#clip69)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 783.544 1277)">1500</text></g><g clip-path="url(#clip70)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 783.544 1003)">2000</text></g><g clip-path="url(#clip71)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 783.544 729)">2500</text></g><g clip-path="url(#clip72)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 783.544 455)">3000</text></g><g clip-path="url(#clip73)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 783.544 182)">3500</text></g><g clip-path="url(#clip74)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 1008.71 2153)">20</text></g><g clip-path="url(#clip75)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 1257.11 2153)">32</text></g><g clip-path="url(#clip76)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 1495.06 2153)">128</text></g><g clip-path="url(#clip77)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 1743.46 2153)">384</text></g><g clip-path="url(#clip78)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 1991.87 2153)">512</text></g><g clip-path="url(#clip79)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 2250.72 2153)">20</text></g><g clip-path="url(#clip80)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 2499.12 2153)">32</text></g><g clip-path="url(#clip81)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 2737.07 2153)">128</text></g><g clip-path="url(#clip82)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 2985.47 2153)">384</text></g><g clip-path="url(#clip83)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 3233.87 2153)">512</text></g><g clip-path="url(#clip84)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 3492.72 2153)">20</text></g><g clip-path="url(#clip85)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 3741.12 2153)">32</text></g><g clip-path="url(#clip86)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 3979.07 2153)">128</text></g><g clip-path="url(#clip87)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 4227.48 2153)">384</text></g><g clip-path="url(#clip88)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 4475.88 2153)">512</text></g><g clip-path="url(#clip89)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 1515.96 2233)">1</text></g><g clip-path="url(#clip90)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 2757.97 2233)">4</text></g><g clip-path="url(#clip91)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 3999.97 2233)">8</text></g><g clip-path="url(#clip92)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="46" transform="matrix(6.12323e-17 -1 1 6.12323e-17 757.854 1247)">Latency (ms)</text></g><g clip-path="url(#clip93)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="46" transform="matrix(1 0 0 1 2609.1 2298)">Sequence Length</text></g><g clip-path="url(#clip94)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="46" transform="matrix(1 0 0 1 2674.14 2354)">Batch Size</text></g><g clip-path="url(#clip95)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="64" transform="matrix(1 0 0 1 1478.14 95)">PyTorch &amp; TensorFlow Eager </text></g><g clip-path="url(#clip96)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="64" transform="matrix(1 0 0 1 2225.31 95)">-</text></g><g clip-path="url(#clip97)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="64" transform="matrix(1 0 0 1 2259.46 95)">Latency measurement for small batch size and default threading settings </text></g><g clip-path="url(#clip98)"><rect x="4703" y="1222" width="23" height="22" fill="#C00000"/></g><g clip-path="url(#clip99)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 4735.75 1244)">pytorch</text></g><g clip-path="url(#clip100)"><rect x="4703" y="1299" width="23" height="23" fill="url(#fill101)"/></g><g clip-path="url(#clip102)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 4735.75 1322)">tensorflow</text></g><rect x="657.5" y="0.499836" width="4307" height="2423" stroke="#D9D9D9" stroke-width="3.4375" stroke-linejoin="round" stroke-miterlimit="10" fill="none"/></g></svg>
4
0
hf_public_repos/blog/assets/21_bert_cpu_scaling_part_1
hf_public_repos/blog/assets/21_bert_cpu_scaling_part_1/imgs/batch_size_scaling_latency_optimal_nb_instances.svg
<svg width="3936" height="1829" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" overflow="hidden"><defs><clipPath id="clip0"><rect x="827" y="772" width="3936" height="1829"/></clipPath><clipPath id="clip1"><rect x="828" y="773" width="3932" height="1825"/></clipPath><clipPath id="clip2"><rect x="1014" y="943" width="3699" height="1350"/></clipPath><clipPath id="clip3"><rect x="1014" y="943" width="3699" height="1350"/></clipPath><clipPath id="clip4"><rect x="828" y="773" width="3932" height="1825"/></clipPath><clipPath id="clip5"><rect x="828" y="773" width="3932" height="1825"/></clipPath><clipPath id="clip6"><rect x="828" y="773" width="3932" height="1825"/></clipPath><clipPath id="clip7"><rect x="828" y="773" width="3932" height="1825"/></clipPath><clipPath id="clip8"><rect x="828" y="773" width="3932" height="1825"/></clipPath><clipPath id="clip9"><rect x="828" y="773" width="3932" height="1825"/></clipPath><clipPath id="clip10"><rect x="828" y="773" width="3932" height="1825"/></clipPath><clipPath id="clip11"><rect x="828" y="773" width="3932" height="1825"/></clipPath><clipPath id="clip12"><rect x="828" y="773" width="3932" height="1825"/></clipPath><clipPath id="clip13"><rect x="828" y="773" width="3932" height="1825"/></clipPath><clipPath id="clip14"><rect x="828" y="773" width="3932" height="1825"/></clipPath><clipPath id="clip15"><rect x="828" y="773" width="3932" height="1825"/></clipPath><clipPath id="clip16"><rect x="828" y="773" width="3932" height="1825"/></clipPath><clipPath id="clip17"><rect x="828" y="773" width="3932" height="1825"/></clipPath><clipPath id="clip18"><rect x="828" y="773" width="3932" height="1825"/></clipPath><clipPath id="clip19"><rect x="828" y="773" width="3932" height="1825"/></clipPath><clipPath id="clip20"><rect x="828" y="773" width="3932" height="1825"/></clipPath><clipPath id="clip21"><rect x="828" y="773" width="3932" height="1825"/></clipPath><clipPath id="clip22"><rect x="828" y="773" width="3932" height="1825"/></clipPath><clipPath id="clip23"><rect x="828" y="773" width="3932" height="1825"/></clipPath><clipPath id="clip24"><rect x="828" y="773" width="3932" height="1825"/></clipPath><clipPath id="clip25"><rect x="828" y="773" width="3932" height="1825"/></clipPath><clipPath id="clip26"><rect x="828" y="773" width="3932" height="1825"/></clipPath><clipPath id="clip27"><rect x="828" y="773" width="3932" height="1825"/></clipPath><clipPath id="clip28"><rect x="828" y="773" width="3932" height="1825"/></clipPath><clipPath id="clip29"><rect x="828" y="773" width="3932" height="1825"/></clipPath><clipPath id="clip30"><rect x="828" y="773" width="3932" height="1825"/></clipPath><clipPath id="clip31"><rect x="828" y="773" width="3932" height="1825"/></clipPath><clipPath id="clip32"><rect x="828" y="773" width="3932" height="1825"/></clipPath><clipPath id="clip33"><rect x="828" y="773" width="3932" height="1825"/></clipPath><clipPath id="clip34"><rect x="828" y="773" width="3932" height="1825"/></clipPath><clipPath id="clip35"><rect x="828" y="773" width="3932" height="1825"/></clipPath><clipPath id="clip36"><rect x="828" y="773" width="3932" height="1825"/></clipPath></defs><g clip-path="url(#clip0)" transform="translate(-827 -772)"><rect x="828" y="773" width="3933" height="1826" fill="#FFFFFF"/><g clip-path="url(#clip1)"><path d="M1014.5 2140.57 4710.5 2140.57M1014.5 1991.57 4710.5 1991.57M1014.5 1841.56 4710.5 1841.56M1014.5 1691.56 4710.5 1691.56M1014.5 1542.55 4710.5 1542.55M1014.5 1392.55 4710.5 1392.55M1014.5 1242.54 4710.5 1242.54M1014.5 1093.54 4710.5 1093.54M1014.5 943.5 4710.5 943.5" stroke="#D9D9D9" stroke-width="3.4375" stroke-linejoin="round" stroke-miterlimit="10" fill="none"/></g><g clip-path="url(#clip2)"><path d="M1196.04 1991.07 1362.04 1991.07 1362.04 2290 1196.04 2290ZM1935.06 1991.07 2101.07 1991.07 2101.07 2290 1935.06 2290ZM2674.09 1692.06 2840.09 1692.06 2840.09 2290 2674.09 2290ZM3414.11 1692.06 3579.12 1692.06 3579.12 2290 3414.11 2290ZM4153.14 1692.06 4318.14 1692.06 4318.14 2290 4153.14 2290Z" fill="#4F81BD"/></g><g clip-path="url(#clip3)"><path d="M1407.05 1692.06 1572.05 1692.06 1572.05 2290 1407.05 2290ZM2146.07 1093.04 2311.08 1093.04 2311.08 2290 2146.07 2290ZM2885.09 1692.06 3051.1 1692.06 3051.1 2290 2885.09 2290ZM3624.12 1093.04 3790.12 1093.04 3790.12 2290 3624.12 2290ZM4363.14 1093.04 4529.15 1093.04 4529.15 2290 4363.14 2290Z" fill="#C0504D"/></g><g clip-path="url(#clip4)"><path d="M1014.5 2290.5 4710.5 2290.5" stroke="#D9D9D9" stroke-width="3.4375" stroke-linejoin="round" stroke-miterlimit="10" fill="none" fill-rule="evenodd"/></g><g clip-path="url(#clip5)"><text fill="#404040" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 1268.49 1959)">2</text></g><g clip-path="url(#clip6)"><text fill="#404040" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 2007.67 1959)">2</text></g><g clip-path="url(#clip7)"><text fill="#404040" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 2746.86 1660)">4</text></g><g clip-path="url(#clip8)"><text fill="#404040" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 3486.05 1660)">4</text></g><g clip-path="url(#clip9)"><text fill="#404040" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 4225.23 1660)">4</text></g><g clip-path="url(#clip10)"><text fill="#404040" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 1478.97 1660)">4</text></g><g clip-path="url(#clip11)"><text fill="#404040" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 2218.16 1061)">8</text></g><g clip-path="url(#clip12)"><text fill="#404040" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 2957.35 1660)">4</text></g><g clip-path="url(#clip13)"><text fill="#404040" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 3696.53 1061)">8</text></g><g clip-path="url(#clip14)"><text fill="#404040" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 4435.72 1061)">8</text></g><g clip-path="url(#clip15)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 955.419 2302)">0</text></g><g clip-path="url(#clip16)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 955.419 2152)">1</text></g><g clip-path="url(#clip17)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 955.419 2003)">2</text></g><g clip-path="url(#clip18)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 955.419 1853)">3</text></g><g clip-path="url(#clip19)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 955.419 1703)">4</text></g><g clip-path="url(#clip20)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 955.419 1554)">5</text></g><g clip-path="url(#clip21)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 955.419 1404)">6</text></g><g clip-path="url(#clip22)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 955.419 1254)">7</text></g><g clip-path="url(#clip23)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 955.419 1105)">8</text></g><g clip-path="url(#clip24)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 955.419 955)">9</text></g><g clip-path="url(#clip25)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 1363.28 2356)">20</text></g><g clip-path="url(#clip26)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 2102.47 2356)">32</text></g><g clip-path="url(#clip27)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 2831.2 2356)">128</text></g><g clip-path="url(#clip28)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 3570.39 2356)">384</text></g><g clip-path="url(#clip29)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 4309.58 2356)">512</text></g><g clip-path="url(#clip30)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="46" transform="matrix(6.12323e-17 -1 1 6.12323e-17 929.729 1874)">Number of model instances</text></g><g clip-path="url(#clip31)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="46" transform="matrix(1 0 0 1 2703.24 2425)">Sequence Length</text></g><g clip-path="url(#clip32)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="64" transform="matrix(1 0 0 1 1563.29 869)">Optimal number of instances minimizing latency w.r.t the sequence length (total batch size = 8)</text></g><g clip-path="url(#clip33)"><rect x="2595" y="2521" width="23" height="23" fill="#4F81BD"/></g><g clip-path="url(#clip34)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 2627.5 2544)">pytorch</text></g><g clip-path="url(#clip35)"><rect x="2797" y="2521" width="22" height="23" fill="#C0504D"/></g><g clip-path="url(#clip36)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 2829.03 2544)">tensorflow</text></g><rect x="828.5" y="773.5" width="3933" height="1826" stroke="#D9D9D9" stroke-width="3.4375" stroke-linejoin="round" stroke-miterlimit="10" fill="none"/></g></svg>
5
0
hf_public_repos/blog/assets/21_bert_cpu_scaling_part_1
hf_public_repos/blog/assets/21_bert_cpu_scaling_part_1/imgs/pytorch_tf_intel_ht_impact.svg
<svg width="3981" height="2244" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" overflow="hidden"><defs><clipPath id="clip0"><rect x="1212" y="171" width="3981" height="2244"/></clipPath><clipPath id="clip1"><rect x="1213" y="171" width="3977" height="2242"/></clipPath><clipPath id="clip2"><rect x="1441" y="341" width="3581" height="1795"/></clipPath><clipPath id="clip3"><rect x="1441" y="341" width="3581" height="1795"/></clipPath><clipPath id="clip4"><rect x="1213" y="171" width="3977" height="2242"/></clipPath><clipPath id="clip5"><rect x="1213" y="171" width="3977" height="2242"/></clipPath><clipPath id="clip6"><rect x="1213" y="171" width="3977" height="2242"/></clipPath><clipPath id="clip7"><rect x="1213" y="171" width="3977" height="2242"/></clipPath><clipPath id="clip8"><rect x="1213" y="171" width="3977" height="2242"/></clipPath><clipPath id="clip9"><rect x="1213" y="171" width="3977" height="2242"/></clipPath><clipPath id="clip10"><rect x="1213" y="171" width="3977" height="2242"/></clipPath><clipPath id="clip11"><rect x="1213" y="171" width="3977" height="2242"/></clipPath><clipPath id="clip12"><rect x="1213" y="171" width="3977" height="2242"/></clipPath><clipPath id="clip13"><rect x="1213" y="171" width="3977" height="2242"/></clipPath><clipPath id="clip14"><rect x="1213" y="171" width="3977" height="2242"/></clipPath><clipPath id="clip15"><rect x="1213" y="171" width="3977" height="2242"/></clipPath><clipPath id="clip16"><rect x="1213" y="171" width="3977" height="2242"/></clipPath><clipPath id="clip17"><rect x="1213" y="171" width="3977" height="2242"/></clipPath><clipPath id="clip18"><rect x="1213" y="171" width="3977" height="2242"/></clipPath><clipPath id="clip19"><rect x="1213" y="171" width="3977" height="2242"/></clipPath><clipPath id="clip20"><rect x="1213" y="171" width="3977" height="2242"/></clipPath><clipPath id="clip21"><rect x="1213" y="171" width="3977" height="2242"/></clipPath><clipPath id="clip22"><rect x="1213" y="171" width="3977" height="2242"/></clipPath><clipPath id="clip23"><rect x="1213" y="171" width="3977" height="2242"/></clipPath><clipPath id="clip24"><rect x="1213" y="171" width="3977" height="2242"/></clipPath><clipPath id="clip25"><rect x="1213" y="171" width="3977" height="2242"/></clipPath><clipPath id="clip26"><rect x="1213" y="171" width="3977" height="2242"/></clipPath><clipPath id="clip27"><rect x="1213" y="171" width="3977" height="2242"/></clipPath><clipPath id="clip28"><rect x="1213" y="171" width="3977" height="2242"/></clipPath><clipPath id="clip29"><rect x="1213" y="171" width="3977" height="2242"/></clipPath><clipPath id="clip30"><rect x="1213" y="171" width="3977" height="2242"/></clipPath><clipPath id="clip31"><rect x="1213" y="171" width="3977" height="2242"/></clipPath><clipPath id="clip32"><rect x="1213" y="171" width="3977" height="2242"/></clipPath><clipPath id="clip33"><rect x="1213" y="171" width="3977" height="2242"/></clipPath><clipPath id="clip34"><rect x="1213" y="171" width="3977" height="2242"/></clipPath><clipPath id="clip35"><rect x="1213" y="171" width="3977" height="2242"/></clipPath><clipPath id="clip36"><rect x="1213" y="171" width="3977" height="2242"/></clipPath><clipPath id="clip37"><rect x="1213" y="171" width="3977" height="2242"/></clipPath><clipPath id="clip38"><rect x="1213" y="171" width="3977" height="2242"/></clipPath><clipPath id="clip39"><rect x="1213" y="171" width="3977" height="2242"/></clipPath><clipPath id="clip40"><rect x="1213" y="171" width="3977" height="2242"/></clipPath><clipPath id="clip41"><rect x="1213" y="171" width="3977" height="2242"/></clipPath><clipPath id="clip42"><rect x="1213" y="171" width="3977" height="2242"/></clipPath><clipPath id="clip43"><rect x="1213" y="171" width="3977" height="2242"/></clipPath><clipPath id="clip44"><rect x="1213" y="171" width="3977" height="2242"/></clipPath><clipPath id="clip45"><rect x="1213" y="171" width="3977" height="2242"/></clipPath><clipPath id="clip46"><rect x="1213" y="171" width="3977" height="2242"/></clipPath><clipPath id="clip47"><rect x="1213" y="171" width="3977" height="2242"/></clipPath><clipPath id="clip48"><rect x="1213" y="171" width="3977" height="2242"/></clipPath><clipPath id="clip49"><rect x="1213" y="171" width="3977" height="2242"/></clipPath><clipPath id="clip50"><rect x="1213" y="171" width="3977" height="2242"/></clipPath><clipPath id="clip51"><rect x="1213" y="171" width="3977" height="2242"/></clipPath><clipPath id="clip52"><rect x="1213" y="171" width="3977" height="2242"/></clipPath><clipPath id="clip53"><rect x="1213" y="171" width="3977" height="2242"/></clipPath><clipPath id="clip54"><rect x="1213" y="171" width="3977" height="2242"/></clipPath><clipPath id="clip55"><rect x="1213" y="171" width="3977" height="2242"/></clipPath><clipPath id="clip56"><rect x="1213" y="171" width="3977" height="2242"/></clipPath><clipPath id="clip57"><rect x="1213" y="171" width="3977" height="2242"/></clipPath></defs><g clip-path="url(#clip0)" transform="translate(-1212 -171)"><rect x="1213" y="172" width="3978" height="2241" fill="#FFFFFF"/><g clip-path="url(#clip1)"><path d="M1441.5 1909.56 5018.5 1909.56M1441.5 1685.56 5018.5 1685.56M1441.5 1461.55 5018.5 1461.55M1441.5 1237.54 5018.5 1237.54M1441.5 1013.53 5018.5 1013.53M1441.5 789.526 5018.5 789.526M1441.5 565.519 5018.5 565.519M1441.5 341.5 5018.5 341.5" stroke="#D9D9D9" stroke-width="3.4375" stroke-linejoin="round" stroke-miterlimit="10" fill="none"/></g><g clip-path="url(#clip2)"><path d="M1529.05 2063.07 1609.05 2063.07 1609.05 2133 1529.05 2133ZM1887.06 1788.06 1967.06 1788.06 1967.06 2133 1887.06 2133ZM2245.07 2057.07 2325.08 2057.07 2325.08 2133 2245.07 2133ZM2602.09 1705.06 2683.09 1705.06 2683.09 2133 2602.09 2133ZM2960.1 2021.07 3040.1 2021.07 3040.1 2133 2960.1 2133ZM3318.11 1395.05 3398.11 1395.05 3398.11 2133 3318.11 2133ZM3676.12 1884.06 3756.12 1884.06 3756.12 2133 3676.12 2133ZM4033.13 662.022 4114.13 662.022 4114.13 2133 4033.13 2133ZM4391.14 1762.06 4471.15 1762.06 4471.15 2133 4391.14 2133ZM4749.16 444.015 4829.16 444.015 4829.16 2133 4749.16 2133Z" fill="#4F81BD"/></g><g clip-path="url(#clip3)"><path d="M1631.05 2055.07 1711.06 2055.07 1711.06 2133 1631.05 2133ZM1989.07 1781.06 2069.07 1781.06 2069.07 2133 1989.07 2133ZM2347.08 2045.07 2427.08 2045.07 2427.08 2133 2347.08 2133ZM2704.09 1756.06 2785.09 1756.06 2785.09 2133 2704.09 2133ZM3062.1 1994.07 3142.1 1994.07 3142.1 2133 3062.1 2133ZM3420.11 1466.05 3500.11 1466.05 3500.11 2133 3420.11 2133ZM3778.12 1856.06 3858.13 1856.06 3858.13 2133 3778.12 2133ZM4135.14 1206.04 4215.14 1206.04 4215.14 2133 4135.14 2133ZM4493.15 1533.05 4573.15 1533.05 4573.15 2133 4493.15 2133ZM4851.16 1077.04 4931.16 1077.04 4931.16 2133 4851.16 2133Z" fill="#C0504D"/></g><g clip-path="url(#clip4)"><path d="M1441.5 2133.5 5018.5 2133.5" stroke="#D9D9D9" stroke-width="3.4375" stroke-linejoin="round" stroke-miterlimit="10" fill="none" fill-rule="evenodd"/></g><g clip-path="url(#clip5)"><path d="M1441.5 2133.5 1441.5 2213.5M2156.57 2133.5 2156.57 2213.5M2872.59 2133.5 2872.59 2213.5M3587.62 2133.5 3587.62 2213.5M4303.64 2133.5 4303.64 2213.5M5018.5 2133.5 5018.5 2213.5" stroke="#D9D9D9" stroke-width="3.4375" stroke-linejoin="round" stroke-miterlimit="10" fill="none"/></g><g clip-path="url(#clip6)"><path d="M1441.5 2213.5 1441.5 2293.5M2156.57 2213.5 2156.57 2293.5M2872.59 2213.5 2872.59 2293.5M3587.62 2213.5 3587.62 2293.5M4303.64 2213.5 4303.64 2293.5M5018.5 2213.5 5018.5 2293.5" stroke="#D9D9D9" stroke-width="3.4375" stroke-linejoin="round" stroke-miterlimit="10" fill="none"/></g><g clip-path="url(#clip7)"><text fill="#404040" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 1548.43 2031)">16</text></g><g clip-path="url(#clip8)"><text fill="#404040" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 1906.19 1757)">77</text></g><g clip-path="url(#clip9)"><text fill="#404040" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 2263.95 2025)">17</text></g><g clip-path="url(#clip10)"><text fill="#404040" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 2621.71 1673)">96</text></g><g clip-path="url(#clip11)"><text fill="#404040" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 2979.47 1989)">25</text></g><g clip-path="url(#clip12)"><text fill="#404040" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 3326.78 1363)">165</text></g><g clip-path="url(#clip13)"><text fill="#404040" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 3694.99 1852)">56</text></g><g clip-path="url(#clip14)"><text fill="#404040" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 4042.31 630)">329</text></g><g clip-path="url(#clip15)"><text fill="#404040" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 4410.52 1730)">83</text></g><g clip-path="url(#clip16)"><text fill="#404040" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 4757.83 412)">377</text></g><g clip-path="url(#clip17)"><text fill="#404040" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 1650.3 2023)">17</text></g><g clip-path="url(#clip18)"><text fill="#404040" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 2008.06 1749)">79</text></g><g clip-path="url(#clip19)"><text fill="#404040" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 2365.82 2014)">20</text></g><g clip-path="url(#clip20)"><text fill="#404040" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 2723.59 1724)">84</text></g><g clip-path="url(#clip21)"><text fill="#404040" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 3081.35 1962)">31</text></g><g clip-path="url(#clip22)"><text fill="#404040" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 3428.66 1434)">149</text></g><g clip-path="url(#clip23)"><text fill="#404040" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 3796.87 1824)">62</text></g><g clip-path="url(#clip24)"><text fill="#404040" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 4144.18 1174)">207</text></g><g clip-path="url(#clip25)"><text fill="#404040" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 4501.94 1501)">134</text></g><g clip-path="url(#clip26)"><text fill="#404040" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 4859.7 1045)">236</text></g><g clip-path="url(#clip27)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 1382.22 2145)">0</text></g><g clip-path="url(#clip28)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 1361.32 1921)">50</text></g><g clip-path="url(#clip29)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 1340.42 1697)">100</text></g><g clip-path="url(#clip30)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 1340.42 1473)">150</text></g><g clip-path="url(#clip31)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 1340.42 1249)">200</text></g><g clip-path="url(#clip32)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 1340.42 1025)">250</text></g><g clip-path="url(#clip33)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 1340.42 801)">300</text></g><g clip-path="url(#clip34)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 1340.42 577)">350</text></g><g clip-path="url(#clip35)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 1340.42 353)">400</text></g><g clip-path="url(#clip36)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 1555.55 2198)">pytorch</text></g><g clip-path="url(#clip37)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 1887.23 2198)">tensorflow</text></g><g clip-path="url(#clip38)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 2271.07 2198)">pytorch</text></g><g clip-path="url(#clip39)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 2602.75 2198)">tensorflow</text></g><g clip-path="url(#clip40)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 2986.59 2198)">pytorch</text></g><g clip-path="url(#clip41)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 3318.27 2198)">tensorflow</text></g><g clip-path="url(#clip42)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 3702.11 2198)">pytorch</text></g><g clip-path="url(#clip43)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 4033.8 2198)">tensorflow</text></g><g clip-path="url(#clip44)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 4417.64 2198)">pytorch</text></g><g clip-path="url(#clip45)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 4749.32 2198)">tensorflow</text></g><g clip-path="url(#clip46)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 1778.25 2278)">20</text></g><g clip-path="url(#clip47)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 2493.77 2278)">32</text></g><g clip-path="url(#clip48)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 3198.84 2278)">128</text></g><g clip-path="url(#clip49)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 3914.36 2278)">384</text></g><g clip-path="url(#clip50)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 4629.88 2278)">512</text></g><g clip-path="url(#clip51)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="46" transform="matrix(6.12323e-17 -1 1 6.12323e-17 1314.73 1356)">Latency (ms)</text></g><g clip-path="url(#clip52)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="46" transform="matrix(1 0 0 1 3075.25 2344)">Sequence length</text></g><g clip-path="url(#clip53)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="64" transform="matrix(1 0 0 1 2378.54 267)">Latency impact of using 1 or 2 hardware thread(s) per CPU core</text></g><g clip-path="url(#clip54)"><rect x="5091" y="1302" width="23" height="23" fill="#4F81BD"/></g><g clip-path="url(#clip55)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 5123.32 1325)">1</text></g><g clip-path="url(#clip56)"><rect x="5091" y="1380" width="23" height="22" fill="#C0504D"/></g><g clip-path="url(#clip57)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 5123.32 1403)">2</text></g><rect x="1213.5" y="172.5" width="3978" height="2241" stroke="#D9D9D9" stroke-width="3.4375" stroke-linejoin="round" stroke-miterlimit="10" fill="none"/></g></svg>
6
0
hf_public_repos/blog/assets/21_bert_cpu_scaling_part_1
hf_public_repos/blog/assets/21_bert_cpu_scaling_part_1/imgs/batch_scaling_exp_throughput.svg
<svg width="3984" height="2331" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" overflow="hidden"><defs><clipPath id="clip0"><rect x="855" y="143" width="3984" height="2331"/></clipPath><clipPath id="clip1"><rect x="855" y="144" width="3981" height="2327"/></clipPath><clipPath id="clip2"><rect x="1083" y="314" width="3424" height="1553"/></clipPath><clipPath id="clip3"><rect x="1083" y="314" width="3424" height="1553"/></clipPath><linearGradient x1="1175" y1="1855" x2="1213" y2="1855" gradientUnits="userSpaceOnUse" spreadMethod="reflect" id="fill4"><stop offset="0" stop-color="#FF7101"/><stop offset="0.0344828" stop-color="#FF7200"/><stop offset="0.0689655" stop-color="#FF7400"/><stop offset="0.103448" stop-color="#FF7600"/><stop offset="0.137931" stop-color="#FF7800"/><stop offset="0.172414" stop-color="#FF7900"/><stop offset="0.206897" stop-color="#FF7B00"/><stop offset="0.241379" stop-color="#FF7D00"/><stop offset="0.275862" stop-color="#FF7E00"/><stop offset="0.310345" stop-color="#FF8000"/><stop offset="0.344828" stop-color="#FF8100"/><stop offset="0.37931" stop-color="#FF8200"/><stop offset="0.413793" stop-color="#FF8300"/><stop offset="0.448276" stop-color="#FF8500"/><stop offset="0.482759" stop-color="#FF8600"/><stop offset="0.517241" stop-color="#FF8700"/><stop offset="0.551724" stop-color="#FF8800"/><stop offset="0.586207" stop-color="#FF8900"/><stop offset="0.62069" stop-color="#FF8A00"/><stop offset="0.655172" stop-color="#FF8A00"/><stop offset="0.689655" stop-color="#FF8B00"/><stop offset="0.724138" stop-color="#FF8C00"/><stop offset="0.758621" stop-color="#FF8C00"/><stop offset="0.793103" stop-color="#FF8D00"/><stop offset="0.827586" stop-color="#FF8D00"/><stop offset="0.862069" stop-color="#FF8E00"/><stop offset="0.896552" stop-color="#FF8E00"/><stop offset="0.931035" stop-color="#FF8E00"/><stop offset="0.965517" stop-color="#FF8E00"/><stop offset="1" stop-color="#FF8F00"/></linearGradient><clipPath id="clip5"><rect x="1083" y="314" width="3424" height="1553"/></clipPath><linearGradient x1="1346" y1="1830" x2="1384" y2="1830" gradientUnits="userSpaceOnUse" spreadMethod="reflect" id="fill6"><stop offset="0" stop-color="#FF7101"/><stop offset="0.0344828" stop-color="#FF7200"/><stop offset="0.0689655" stop-color="#FF7400"/><stop offset="0.103448" stop-color="#FF7600"/><stop offset="0.137931" stop-color="#FF7800"/><stop offset="0.172414" stop-color="#FF7900"/><stop offset="0.206897" stop-color="#FF7B00"/><stop offset="0.241379" stop-color="#FF7D00"/><stop offset="0.275862" stop-color="#FF7E00"/><stop offset="0.310345" stop-color="#FF8000"/><stop offset="0.344828" stop-color="#FF8100"/><stop offset="0.37931" stop-color="#FF8200"/><stop offset="0.413793" stop-color="#FF8300"/><stop offset="0.448276" stop-color="#FF8500"/><stop offset="0.482759" stop-color="#FF8600"/><stop offset="0.517241" stop-color="#FF8700"/><stop offset="0.551724" stop-color="#FF8800"/><stop offset="0.586207" stop-color="#FF8900"/><stop offset="0.62069" stop-color="#FF8A00"/><stop offset="0.655172" stop-color="#FF8A00"/><stop offset="0.689655" stop-color="#FF8B00"/><stop offset="0.724138" stop-color="#FF8C00"/><stop offset="0.758621" stop-color="#FF8C00"/><stop offset="0.793103" stop-color="#FF8D00"/><stop offset="0.827586" stop-color="#FF8D00"/><stop offset="0.862069" stop-color="#FF8E00"/><stop offset="0.896552" stop-color="#FF8E00"/><stop offset="0.931035" stop-color="#FF8E00"/><stop offset="0.965517" stop-color="#FF8E00"/><stop offset="1" stop-color="#FF8F00"/></linearGradient><clipPath id="clip7"><rect x="1083" y="314" width="3424" height="1553"/></clipPath><linearGradient x1="1517" y1="1790" x2="1555" y2="1790" gradientUnits="userSpaceOnUse" spreadMethod="reflect" id="fill8"><stop offset="0" stop-color="#FF7101"/><stop offset="0.0344828" stop-color="#FF7200"/><stop offset="0.0689655" stop-color="#FF7400"/><stop offset="0.103448" stop-color="#FF7600"/><stop offset="0.137931" stop-color="#FF7800"/><stop offset="0.172414" stop-color="#FF7900"/><stop offset="0.206897" stop-color="#FF7B00"/><stop offset="0.241379" stop-color="#FF7D00"/><stop offset="0.275862" stop-color="#FF7E00"/><stop offset="0.310345" stop-color="#FF8000"/><stop offset="0.344828" stop-color="#FF8100"/><stop offset="0.37931" stop-color="#FF8200"/><stop offset="0.413793" stop-color="#FF8300"/><stop offset="0.448276" stop-color="#FF8500"/><stop offset="0.482759" stop-color="#FF8600"/><stop offset="0.517241" stop-color="#FF8700"/><stop offset="0.551724" stop-color="#FF8800"/><stop offset="0.586207" stop-color="#FF8900"/><stop offset="0.62069" stop-color="#FF8A00"/><stop offset="0.655172" stop-color="#FF8A00"/><stop offset="0.689655" stop-color="#FF8B00"/><stop offset="0.724138" stop-color="#FF8C00"/><stop offset="0.758621" stop-color="#FF8C00"/><stop offset="0.793103" stop-color="#FF8D00"/><stop offset="0.827586" stop-color="#FF8D00"/><stop offset="0.862069" stop-color="#FF8E00"/><stop offset="0.896552" stop-color="#FF8E00"/><stop offset="0.931035" stop-color="#FF8E00"/><stop offset="0.965517" stop-color="#FF8E00"/><stop offset="1" stop-color="#FF8F00"/></linearGradient><clipPath id="clip9"><rect x="1083" y="314" width="3424" height="1553"/></clipPath><linearGradient x1="1688" y1="1718" x2="1726" y2="1718" gradientUnits="userSpaceOnUse" spreadMethod="reflect" id="fill10"><stop offset="0" stop-color="#FF7101"/><stop offset="0.0344828" stop-color="#FF7200"/><stop offset="0.0689655" stop-color="#FF7400"/><stop offset="0.103448" stop-color="#FF7600"/><stop offset="0.137931" stop-color="#FF7800"/><stop offset="0.172414" stop-color="#FF7900"/><stop offset="0.206897" stop-color="#FF7B00"/><stop offset="0.241379" stop-color="#FF7D00"/><stop offset="0.275862" stop-color="#FF7E00"/><stop offset="0.310345" stop-color="#FF8000"/><stop offset="0.344828" stop-color="#FF8100"/><stop offset="0.37931" stop-color="#FF8200"/><stop offset="0.413793" stop-color="#FF8300"/><stop offset="0.448276" stop-color="#FF8500"/><stop offset="0.482759" stop-color="#FF8600"/><stop offset="0.517241" stop-color="#FF8700"/><stop offset="0.551724" stop-color="#FF8800"/><stop offset="0.586207" stop-color="#FF8900"/><stop offset="0.62069" stop-color="#FF8A00"/><stop offset="0.655172" stop-color="#FF8A00"/><stop offset="0.689655" stop-color="#FF8B00"/><stop offset="0.724138" stop-color="#FF8C00"/><stop offset="0.758621" stop-color="#FF8C00"/><stop offset="0.793103" stop-color="#FF8D00"/><stop offset="0.827586" stop-color="#FF8D00"/><stop offset="0.862069" stop-color="#FF8E00"/><stop offset="0.896552" stop-color="#FF8E00"/><stop offset="0.931035" stop-color="#FF8E00"/><stop offset="0.965517" stop-color="#FF8E00"/><stop offset="1" stop-color="#FF8F00"/></linearGradient><clipPath id="clip11"><rect x="1083" y="314" width="3424" height="1553"/></clipPath><linearGradient x1="1859" y1="1855.5" x2="1897" y2="1855.5" gradientUnits="userSpaceOnUse" spreadMethod="reflect" id="fill12"><stop offset="0" stop-color="#FF7101"/><stop offset="0.0344828" stop-color="#FF7200"/><stop offset="0.0689655" stop-color="#FF7400"/><stop offset="0.103448" stop-color="#FF7600"/><stop offset="0.137931" stop-color="#FF7800"/><stop offset="0.172414" stop-color="#FF7900"/><stop offset="0.206897" stop-color="#FF7B00"/><stop offset="0.241379" stop-color="#FF7D00"/><stop offset="0.275862" stop-color="#FF7E00"/><stop offset="0.310345" stop-color="#FF8000"/><stop offset="0.344828" stop-color="#FF8100"/><stop offset="0.37931" stop-color="#FF8200"/><stop offset="0.413793" stop-color="#FF8300"/><stop offset="0.448276" stop-color="#FF8500"/><stop offset="0.482759" stop-color="#FF8600"/><stop offset="0.517241" stop-color="#FF8700"/><stop offset="0.551724" stop-color="#FF8800"/><stop offset="0.586207" stop-color="#FF8900"/><stop offset="0.62069" stop-color="#FF8A00"/><stop offset="0.655172" stop-color="#FF8A00"/><stop offset="0.689655" stop-color="#FF8B00"/><stop offset="0.724138" stop-color="#FF8C00"/><stop offset="0.758621" stop-color="#FF8C00"/><stop offset="0.793103" stop-color="#FF8D00"/><stop offset="0.827586" stop-color="#FF8D00"/><stop offset="0.862069" stop-color="#FF8E00"/><stop offset="0.896552" stop-color="#FF8E00"/><stop offset="0.931035" stop-color="#FF8E00"/><stop offset="0.965517" stop-color="#FF8E00"/><stop offset="1" stop-color="#FF8F00"/></linearGradient><clipPath id="clip13"><rect x="1083" y="314" width="3424" height="1553"/></clipPath><linearGradient x1="2030" y1="1839" x2="2068" y2="1839" gradientUnits="userSpaceOnUse" spreadMethod="reflect" id="fill14"><stop offset="0" stop-color="#FF7101"/><stop offset="0.0344828" stop-color="#FF7200"/><stop offset="0.0689655" stop-color="#FF7400"/><stop offset="0.103448" stop-color="#FF7600"/><stop offset="0.137931" stop-color="#FF7800"/><stop offset="0.172414" stop-color="#FF7900"/><stop offset="0.206897" stop-color="#FF7B00"/><stop offset="0.241379" stop-color="#FF7D00"/><stop offset="0.275862" stop-color="#FF7E00"/><stop offset="0.310345" stop-color="#FF8000"/><stop offset="0.344828" stop-color="#FF8100"/><stop offset="0.37931" stop-color="#FF8200"/><stop offset="0.413793" stop-color="#FF8300"/><stop offset="0.448276" stop-color="#FF8500"/><stop offset="0.482759" stop-color="#FF8600"/><stop offset="0.517241" stop-color="#FF8700"/><stop offset="0.551724" stop-color="#FF8800"/><stop offset="0.586207" stop-color="#FF8900"/><stop offset="0.62069" stop-color="#FF8A00"/><stop offset="0.655172" stop-color="#FF8A00"/><stop offset="0.689655" stop-color="#FF8B00"/><stop offset="0.724138" stop-color="#FF8C00"/><stop offset="0.758621" stop-color="#FF8C00"/><stop offset="0.793103" stop-color="#FF8D00"/><stop offset="0.827586" stop-color="#FF8D00"/><stop offset="0.862069" stop-color="#FF8E00"/><stop offset="0.896552" stop-color="#FF8E00"/><stop offset="0.931035" stop-color="#FF8E00"/><stop offset="0.965517" stop-color="#FF8E00"/><stop offset="1" stop-color="#FF8F00"/></linearGradient><clipPath id="clip15"><rect x="1083" y="314" width="3424" height="1553"/></clipPath><linearGradient x1="2201" y1="1804" x2="2239" y2="1804" gradientUnits="userSpaceOnUse" spreadMethod="reflect" id="fill16"><stop offset="0" stop-color="#FF7101"/><stop offset="0.0344828" stop-color="#FF7200"/><stop offset="0.0689655" stop-color="#FF7400"/><stop offset="0.103448" stop-color="#FF7600"/><stop offset="0.137931" stop-color="#FF7800"/><stop offset="0.172414" stop-color="#FF7900"/><stop offset="0.206897" stop-color="#FF7B00"/><stop offset="0.241379" stop-color="#FF7D00"/><stop offset="0.275862" stop-color="#FF7E00"/><stop offset="0.310345" stop-color="#FF8000"/><stop offset="0.344828" stop-color="#FF8100"/><stop offset="0.37931" stop-color="#FF8200"/><stop offset="0.413793" stop-color="#FF8300"/><stop offset="0.448276" stop-color="#FF8500"/><stop offset="0.482759" stop-color="#FF8600"/><stop offset="0.517241" stop-color="#FF8700"/><stop offset="0.551724" stop-color="#FF8800"/><stop offset="0.586207" stop-color="#FF8900"/><stop offset="0.62069" stop-color="#FF8A00"/><stop offset="0.655172" stop-color="#FF8A00"/><stop offset="0.689655" stop-color="#FF8B00"/><stop offset="0.724138" stop-color="#FF8C00"/><stop offset="0.758621" stop-color="#FF8C00"/><stop offset="0.793103" stop-color="#FF8D00"/><stop offset="0.827586" stop-color="#FF8D00"/><stop offset="0.862069" stop-color="#FF8E00"/><stop offset="0.896552" stop-color="#FF8E00"/><stop offset="0.931035" stop-color="#FF8E00"/><stop offset="0.965517" stop-color="#FF8E00"/><stop offset="1" stop-color="#FF8F00"/></linearGradient><clipPath id="clip17"><rect x="1083" y="314" width="3424" height="1553"/></clipPath><linearGradient x1="2372" y1="1720.5" x2="2410" y2="1720.5" gradientUnits="userSpaceOnUse" spreadMethod="reflect" id="fill18"><stop offset="0" stop-color="#FF7101"/><stop offset="0.0344828" stop-color="#FF7200"/><stop offset="0.0689655" stop-color="#FF7400"/><stop offset="0.103448" stop-color="#FF7600"/><stop offset="0.137931" stop-color="#FF7800"/><stop offset="0.172414" stop-color="#FF7900"/><stop offset="0.206897" stop-color="#FF7B00"/><stop offset="0.241379" stop-color="#FF7D00"/><stop offset="0.275862" stop-color="#FF7E00"/><stop offset="0.310345" stop-color="#FF8000"/><stop offset="0.344828" stop-color="#FF8100"/><stop offset="0.37931" stop-color="#FF8200"/><stop offset="0.413793" stop-color="#FF8300"/><stop offset="0.448276" stop-color="#FF8500"/><stop offset="0.482759" stop-color="#FF8600"/><stop offset="0.517241" stop-color="#FF8700"/><stop offset="0.551724" stop-color="#FF8800"/><stop offset="0.586207" stop-color="#FF8900"/><stop offset="0.62069" stop-color="#FF8A00"/><stop offset="0.655172" stop-color="#FF8A00"/><stop offset="0.689655" stop-color="#FF8B00"/><stop offset="0.724138" stop-color="#FF8C00"/><stop offset="0.758621" stop-color="#FF8C00"/><stop offset="0.793103" stop-color="#FF8D00"/><stop offset="0.827586" stop-color="#FF8D00"/><stop offset="0.862069" stop-color="#FF8E00"/><stop offset="0.896552" stop-color="#FF8E00"/><stop offset="0.931035" stop-color="#FF8E00"/><stop offset="0.965517" stop-color="#FF8E00"/><stop offset="1" stop-color="#FF8F00"/></linearGradient><clipPath id="clip19"><rect x="1083" y="314" width="3424" height="1553"/></clipPath><linearGradient x1="2543" y1="1859" x2="2581" y2="1859" gradientUnits="userSpaceOnUse" spreadMethod="reflect" id="fill20"><stop offset="0" stop-color="#FF7101"/><stop offset="0.0344828" stop-color="#FF7200"/><stop offset="0.0689655" stop-color="#FF7400"/><stop offset="0.103448" stop-color="#FF7600"/><stop offset="0.137931" stop-color="#FF7800"/><stop offset="0.172414" stop-color="#FF7900"/><stop offset="0.206897" stop-color="#FF7B00"/><stop offset="0.241379" stop-color="#FF7D00"/><stop offset="0.275862" stop-color="#FF7E00"/><stop offset="0.310345" stop-color="#FF8000"/><stop offset="0.344828" stop-color="#FF8100"/><stop offset="0.37931" stop-color="#FF8200"/><stop offset="0.413793" stop-color="#FF8300"/><stop offset="0.448276" stop-color="#FF8500"/><stop offset="0.482759" stop-color="#FF8600"/><stop offset="0.517241" stop-color="#FF8700"/><stop offset="0.551724" stop-color="#FF8800"/><stop offset="0.586207" stop-color="#FF8900"/><stop offset="0.62069" stop-color="#FF8A00"/><stop offset="0.655172" stop-color="#FF8A00"/><stop offset="0.689655" stop-color="#FF8B00"/><stop offset="0.724138" stop-color="#FF8C00"/><stop offset="0.758621" stop-color="#FF8C00"/><stop offset="0.793103" stop-color="#FF8D00"/><stop offset="0.827586" stop-color="#FF8D00"/><stop offset="0.862069" stop-color="#FF8E00"/><stop offset="0.896552" stop-color="#FF8E00"/><stop offset="0.931035" stop-color="#FF8E00"/><stop offset="0.965517" stop-color="#FF8E00"/><stop offset="1" stop-color="#FF8F00"/></linearGradient><clipPath id="clip21"><rect x="1083" y="314" width="3424" height="1553"/></clipPath><linearGradient x1="2714" y1="1849.5" x2="2752" y2="1849.5" gradientUnits="userSpaceOnUse" spreadMethod="reflect" id="fill22"><stop offset="0" stop-color="#FF7101"/><stop offset="0.0344828" stop-color="#FF7200"/><stop offset="0.0689655" stop-color="#FF7400"/><stop offset="0.103448" stop-color="#FF7600"/><stop offset="0.137931" stop-color="#FF7800"/><stop offset="0.172414" stop-color="#FF7900"/><stop offset="0.206897" stop-color="#FF7B00"/><stop offset="0.241379" stop-color="#FF7D00"/><stop offset="0.275862" stop-color="#FF7E00"/><stop offset="0.310345" stop-color="#FF8000"/><stop offset="0.344828" stop-color="#FF8100"/><stop offset="0.37931" stop-color="#FF8200"/><stop offset="0.413793" stop-color="#FF8300"/><stop offset="0.448276" stop-color="#FF8500"/><stop offset="0.482759" stop-color="#FF8600"/><stop offset="0.517241" stop-color="#FF8700"/><stop offset="0.551724" stop-color="#FF8800"/><stop offset="0.586207" stop-color="#FF8900"/><stop offset="0.62069" stop-color="#FF8A00"/><stop offset="0.655172" stop-color="#FF8A00"/><stop offset="0.689655" stop-color="#FF8B00"/><stop offset="0.724138" stop-color="#FF8C00"/><stop offset="0.758621" stop-color="#FF8C00"/><stop offset="0.793103" stop-color="#FF8D00"/><stop offset="0.827586" stop-color="#FF8D00"/><stop offset="0.862069" stop-color="#FF8E00"/><stop offset="0.896552" stop-color="#FF8E00"/><stop offset="0.931035" stop-color="#FF8E00"/><stop offset="0.965517" stop-color="#FF8E00"/><stop offset="1" stop-color="#FF8F00"/></linearGradient><clipPath id="clip23"><rect x="1083" y="314" width="3424" height="1553"/></clipPath><linearGradient x1="2885" y1="1833" x2="2923" y2="1833" gradientUnits="userSpaceOnUse" spreadMethod="reflect" id="fill24"><stop offset="0" stop-color="#FF7101"/><stop offset="0.0344828" stop-color="#FF7200"/><stop offset="0.0689655" stop-color="#FF7400"/><stop offset="0.103448" stop-color="#FF7600"/><stop offset="0.137931" stop-color="#FF7800"/><stop offset="0.172414" stop-color="#FF7900"/><stop offset="0.206897" stop-color="#FF7B00"/><stop offset="0.241379" stop-color="#FF7D00"/><stop offset="0.275862" stop-color="#FF7E00"/><stop offset="0.310345" stop-color="#FF8000"/><stop offset="0.344828" stop-color="#FF8100"/><stop offset="0.37931" stop-color="#FF8200"/><stop offset="0.413793" stop-color="#FF8300"/><stop offset="0.448276" stop-color="#FF8500"/><stop offset="0.482759" stop-color="#FF8600"/><stop offset="0.517241" stop-color="#FF8700"/><stop offset="0.551724" stop-color="#FF8800"/><stop offset="0.586207" stop-color="#FF8900"/><stop offset="0.62069" stop-color="#FF8A00"/><stop offset="0.655172" stop-color="#FF8A00"/><stop offset="0.689655" stop-color="#FF8B00"/><stop offset="0.724138" stop-color="#FF8C00"/><stop offset="0.758621" stop-color="#FF8C00"/><stop offset="0.793103" stop-color="#FF8D00"/><stop offset="0.827586" stop-color="#FF8D00"/><stop offset="0.862069" stop-color="#FF8E00"/><stop offset="0.896552" stop-color="#FF8E00"/><stop offset="0.931035" stop-color="#FF8E00"/><stop offset="0.965517" stop-color="#FF8E00"/><stop offset="1" stop-color="#FF8F00"/></linearGradient><clipPath id="clip25"><rect x="1083" y="314" width="3424" height="1553"/></clipPath><linearGradient x1="3056" y1="1806" x2="3094" y2="1806" gradientUnits="userSpaceOnUse" spreadMethod="reflect" id="fill26"><stop offset="0" stop-color="#FF7101"/><stop offset="0.0344828" stop-color="#FF7200"/><stop offset="0.0689655" stop-color="#FF7400"/><stop offset="0.103448" stop-color="#FF7600"/><stop offset="0.137931" stop-color="#FF7800"/><stop offset="0.172414" stop-color="#FF7900"/><stop offset="0.206897" stop-color="#FF7B00"/><stop offset="0.241379" stop-color="#FF7D00"/><stop offset="0.275862" stop-color="#FF7E00"/><stop offset="0.310345" stop-color="#FF8000"/><stop offset="0.344828" stop-color="#FF8100"/><stop offset="0.37931" stop-color="#FF8200"/><stop offset="0.413793" stop-color="#FF8300"/><stop offset="0.448276" stop-color="#FF8500"/><stop offset="0.482759" stop-color="#FF8600"/><stop offset="0.517241" stop-color="#FF8700"/><stop offset="0.551724" stop-color="#FF8800"/><stop offset="0.586207" stop-color="#FF8900"/><stop offset="0.62069" stop-color="#FF8A00"/><stop offset="0.655172" stop-color="#FF8A00"/><stop offset="0.689655" stop-color="#FF8B00"/><stop offset="0.724138" stop-color="#FF8C00"/><stop offset="0.758621" stop-color="#FF8C00"/><stop offset="0.793103" stop-color="#FF8D00"/><stop offset="0.827586" stop-color="#FF8D00"/><stop offset="0.862069" stop-color="#FF8E00"/><stop offset="0.896552" stop-color="#FF8E00"/><stop offset="0.931035" stop-color="#FF8E00"/><stop offset="0.965517" stop-color="#FF8E00"/><stop offset="1" stop-color="#FF8F00"/></linearGradient><clipPath id="clip27"><rect x="1083" y="314" width="3424" height="1553"/></clipPath><linearGradient x1="3227" y1="1862.5" x2="3265" y2="1862.5" gradientUnits="userSpaceOnUse" spreadMethod="reflect" id="fill28"><stop offset="0" stop-color="#FF7101"/><stop offset="0.0344828" stop-color="#FF7200"/><stop offset="0.0689655" stop-color="#FF7400"/><stop offset="0.103448" stop-color="#FF7600"/><stop offset="0.137931" stop-color="#FF7800"/><stop offset="0.172414" stop-color="#FF7900"/><stop offset="0.206897" stop-color="#FF7B00"/><stop offset="0.241379" stop-color="#FF7D00"/><stop offset="0.275862" stop-color="#FF7E00"/><stop offset="0.310345" stop-color="#FF8000"/><stop offset="0.344828" stop-color="#FF8100"/><stop offset="0.37931" stop-color="#FF8200"/><stop offset="0.413793" stop-color="#FF8300"/><stop offset="0.448276" stop-color="#FF8500"/><stop offset="0.482759" stop-color="#FF8600"/><stop offset="0.517241" stop-color="#FF8700"/><stop offset="0.551724" stop-color="#FF8800"/><stop offset="0.586207" stop-color="#FF8900"/><stop offset="0.62069" stop-color="#FF8A00"/><stop offset="0.655172" stop-color="#FF8A00"/><stop offset="0.689655" stop-color="#FF8B00"/><stop offset="0.724138" stop-color="#FF8C00"/><stop offset="0.758621" stop-color="#FF8C00"/><stop offset="0.793103" stop-color="#FF8D00"/><stop offset="0.827586" stop-color="#FF8D00"/><stop offset="0.862069" stop-color="#FF8E00"/><stop offset="0.896552" stop-color="#FF8E00"/><stop offset="0.931035" stop-color="#FF8E00"/><stop offset="0.965517" stop-color="#FF8E00"/><stop offset="1" stop-color="#FF8F00"/></linearGradient><clipPath id="clip29"><rect x="1083" y="314" width="3424" height="1553"/></clipPath><linearGradient x1="3398" y1="1857" x2="3436" y2="1857" gradientUnits="userSpaceOnUse" spreadMethod="reflect" id="fill30"><stop offset="0" stop-color="#FF7101"/><stop offset="0.0344828" stop-color="#FF7200"/><stop offset="0.0689655" stop-color="#FF7400"/><stop offset="0.103448" stop-color="#FF7600"/><stop offset="0.137931" stop-color="#FF7800"/><stop offset="0.172414" stop-color="#FF7900"/><stop offset="0.206897" stop-color="#FF7B00"/><stop offset="0.241379" stop-color="#FF7D00"/><stop offset="0.275862" stop-color="#FF7E00"/><stop offset="0.310345" stop-color="#FF8000"/><stop offset="0.344828" stop-color="#FF8100"/><stop offset="0.37931" stop-color="#FF8200"/><stop offset="0.413793" stop-color="#FF8300"/><stop offset="0.448276" stop-color="#FF8500"/><stop offset="0.482759" stop-color="#FF8600"/><stop offset="0.517241" stop-color="#FF8700"/><stop offset="0.551724" stop-color="#FF8800"/><stop offset="0.586207" stop-color="#FF8900"/><stop offset="0.62069" stop-color="#FF8A00"/><stop offset="0.655172" stop-color="#FF8A00"/><stop offset="0.689655" stop-color="#FF8B00"/><stop offset="0.724138" stop-color="#FF8C00"/><stop offset="0.758621" stop-color="#FF8C00"/><stop offset="0.793103" stop-color="#FF8D00"/><stop offset="0.827586" stop-color="#FF8D00"/><stop offset="0.862069" stop-color="#FF8E00"/><stop offset="0.896552" stop-color="#FF8E00"/><stop offset="0.931035" stop-color="#FF8E00"/><stop offset="0.965517" stop-color="#FF8E00"/><stop offset="1" stop-color="#FF8F00"/></linearGradient><clipPath id="clip31"><rect x="1083" y="314" width="3424" height="1553"/></clipPath><linearGradient x1="3569" y1="1848" x2="3607" y2="1848" gradientUnits="userSpaceOnUse" spreadMethod="reflect" id="fill32"><stop offset="0" stop-color="#FF7101"/><stop offset="0.0344828" stop-color="#FF7200"/><stop offset="0.0689655" stop-color="#FF7400"/><stop offset="0.103448" stop-color="#FF7600"/><stop offset="0.137931" stop-color="#FF7800"/><stop offset="0.172414" stop-color="#FF7900"/><stop offset="0.206897" stop-color="#FF7B00"/><stop offset="0.241379" stop-color="#FF7D00"/><stop offset="0.275862" stop-color="#FF7E00"/><stop offset="0.310345" stop-color="#FF8000"/><stop offset="0.344828" stop-color="#FF8100"/><stop offset="0.37931" stop-color="#FF8200"/><stop offset="0.413793" stop-color="#FF8300"/><stop offset="0.448276" stop-color="#FF8500"/><stop offset="0.482759" stop-color="#FF8600"/><stop offset="0.517241" stop-color="#FF8700"/><stop offset="0.551724" stop-color="#FF8800"/><stop offset="0.586207" stop-color="#FF8900"/><stop offset="0.62069" stop-color="#FF8A00"/><stop offset="0.655172" stop-color="#FF8A00"/><stop offset="0.689655" stop-color="#FF8B00"/><stop offset="0.724138" stop-color="#FF8C00"/><stop offset="0.758621" stop-color="#FF8C00"/><stop offset="0.793103" stop-color="#FF8D00"/><stop offset="0.827586" stop-color="#FF8D00"/><stop offset="0.862069" stop-color="#FF8E00"/><stop offset="0.896552" stop-color="#FF8E00"/><stop offset="0.931035" stop-color="#FF8E00"/><stop offset="0.965517" stop-color="#FF8E00"/><stop offset="1" stop-color="#FF8F00"/></linearGradient><clipPath id="clip33"><rect x="1083" y="314" width="3424" height="1553"/></clipPath><linearGradient x1="3740" y1="1829" x2="3778" y2="1829" gradientUnits="userSpaceOnUse" spreadMethod="reflect" id="fill34"><stop offset="0" stop-color="#FF7101"/><stop offset="0.0344828" stop-color="#FF7200"/><stop offset="0.0689655" stop-color="#FF7400"/><stop offset="0.103448" stop-color="#FF7600"/><stop offset="0.137931" stop-color="#FF7800"/><stop offset="0.172414" stop-color="#FF7900"/><stop offset="0.206897" stop-color="#FF7B00"/><stop offset="0.241379" stop-color="#FF7D00"/><stop offset="0.275862" stop-color="#FF7E00"/><stop offset="0.310345" stop-color="#FF8000"/><stop offset="0.344828" stop-color="#FF8100"/><stop offset="0.37931" stop-color="#FF8200"/><stop offset="0.413793" stop-color="#FF8300"/><stop offset="0.448276" stop-color="#FF8500"/><stop offset="0.482759" stop-color="#FF8600"/><stop offset="0.517241" stop-color="#FF8700"/><stop offset="0.551724" stop-color="#FF8800"/><stop offset="0.586207" stop-color="#FF8900"/><stop offset="0.62069" stop-color="#FF8A00"/><stop offset="0.655172" stop-color="#FF8A00"/><stop offset="0.689655" stop-color="#FF8B00"/><stop offset="0.724138" stop-color="#FF8C00"/><stop offset="0.758621" stop-color="#FF8C00"/><stop offset="0.793103" stop-color="#FF8D00"/><stop offset="0.827586" stop-color="#FF8D00"/><stop offset="0.862069" stop-color="#FF8E00"/><stop offset="0.896552" stop-color="#FF8E00"/><stop offset="0.931035" stop-color="#FF8E00"/><stop offset="0.965517" stop-color="#FF8E00"/><stop offset="1" stop-color="#FF8F00"/></linearGradient><clipPath id="clip35"><rect x="1083" y="314" width="3424" height="1553"/></clipPath><linearGradient x1="3911" y1="1863" x2="3949" y2="1863" gradientUnits="userSpaceOnUse" spreadMethod="reflect" id="fill36"><stop offset="0" stop-color="#FF7101"/><stop offset="0.0344828" stop-color="#FF7200"/><stop offset="0.0689655" stop-color="#FF7400"/><stop offset="0.103448" stop-color="#FF7600"/><stop offset="0.137931" stop-color="#FF7800"/><stop offset="0.172414" stop-color="#FF7900"/><stop offset="0.206897" stop-color="#FF7B00"/><stop offset="0.241379" stop-color="#FF7D00"/><stop offset="0.275862" stop-color="#FF7E00"/><stop offset="0.310345" stop-color="#FF8000"/><stop offset="0.344828" stop-color="#FF8100"/><stop offset="0.37931" stop-color="#FF8200"/><stop offset="0.413793" stop-color="#FF8300"/><stop offset="0.448276" stop-color="#FF8500"/><stop offset="0.482759" stop-color="#FF8600"/><stop offset="0.517241" stop-color="#FF8700"/><stop offset="0.551724" stop-color="#FF8800"/><stop offset="0.586207" stop-color="#FF8900"/><stop offset="0.62069" stop-color="#FF8A00"/><stop offset="0.655172" stop-color="#FF8A00"/><stop offset="0.689655" stop-color="#FF8B00"/><stop offset="0.724138" stop-color="#FF8C00"/><stop offset="0.758621" stop-color="#FF8C00"/><stop offset="0.793103" stop-color="#FF8D00"/><stop offset="0.827586" stop-color="#FF8D00"/><stop offset="0.862069" stop-color="#FF8E00"/><stop offset="0.896552" stop-color="#FF8E00"/><stop offset="0.931035" stop-color="#FF8E00"/><stop offset="0.965517" stop-color="#FF8E00"/><stop offset="1" stop-color="#FF8F00"/></linearGradient><clipPath id="clip37"><rect x="1083" y="314" width="3424" height="1553"/></clipPath><linearGradient x1="4082" y1="1858.5" x2="4120" y2="1858.5" gradientUnits="userSpaceOnUse" spreadMethod="reflect" id="fill38"><stop offset="0" stop-color="#FF7101"/><stop offset="0.0344828" stop-color="#FF7200"/><stop offset="0.0689655" stop-color="#FF7400"/><stop offset="0.103448" stop-color="#FF7600"/><stop offset="0.137931" stop-color="#FF7800"/><stop offset="0.172414" stop-color="#FF7900"/><stop offset="0.206897" stop-color="#FF7B00"/><stop offset="0.241379" stop-color="#FF7D00"/><stop offset="0.275862" stop-color="#FF7E00"/><stop offset="0.310345" stop-color="#FF8000"/><stop offset="0.344828" stop-color="#FF8100"/><stop offset="0.37931" stop-color="#FF8200"/><stop offset="0.413793" stop-color="#FF8300"/><stop offset="0.448276" stop-color="#FF8500"/><stop offset="0.482759" stop-color="#FF8600"/><stop offset="0.517241" stop-color="#FF8700"/><stop offset="0.551724" stop-color="#FF8800"/><stop offset="0.586207" stop-color="#FF8900"/><stop offset="0.62069" stop-color="#FF8A00"/><stop offset="0.655172" stop-color="#FF8A00"/><stop offset="0.689655" stop-color="#FF8B00"/><stop offset="0.724138" stop-color="#FF8C00"/><stop offset="0.758621" stop-color="#FF8C00"/><stop offset="0.793103" stop-color="#FF8D00"/><stop offset="0.827586" stop-color="#FF8D00"/><stop offset="0.862069" stop-color="#FF8E00"/><stop offset="0.896552" stop-color="#FF8E00"/><stop offset="0.931035" stop-color="#FF8E00"/><stop offset="0.965517" stop-color="#FF8E00"/><stop offset="1" stop-color="#FF8F00"/></linearGradient><clipPath id="clip39"><rect x="1083" y="314" width="3424" height="1553"/></clipPath><linearGradient x1="4253" y1="1849" x2="4291" y2="1849" gradientUnits="userSpaceOnUse" spreadMethod="reflect" id="fill40"><stop offset="0" stop-color="#FF7101"/><stop offset="0.0344828" stop-color="#FF7200"/><stop offset="0.0689655" stop-color="#FF7400"/><stop offset="0.103448" stop-color="#FF7600"/><stop offset="0.137931" stop-color="#FF7800"/><stop offset="0.172414" stop-color="#FF7900"/><stop offset="0.206897" stop-color="#FF7B00"/><stop offset="0.241379" stop-color="#FF7D00"/><stop offset="0.275862" stop-color="#FF7E00"/><stop offset="0.310345" stop-color="#FF8000"/><stop offset="0.344828" stop-color="#FF8100"/><stop offset="0.37931" stop-color="#FF8200"/><stop offset="0.413793" stop-color="#FF8300"/><stop offset="0.448276" stop-color="#FF8500"/><stop offset="0.482759" stop-color="#FF8600"/><stop offset="0.517241" stop-color="#FF8700"/><stop offset="0.551724" stop-color="#FF8800"/><stop offset="0.586207" stop-color="#FF8900"/><stop offset="0.62069" stop-color="#FF8A00"/><stop offset="0.655172" stop-color="#FF8A00"/><stop offset="0.689655" stop-color="#FF8B00"/><stop offset="0.724138" stop-color="#FF8C00"/><stop offset="0.758621" stop-color="#FF8C00"/><stop offset="0.793103" stop-color="#FF8D00"/><stop offset="0.827586" stop-color="#FF8D00"/><stop offset="0.862069" stop-color="#FF8E00"/><stop offset="0.896552" stop-color="#FF8E00"/><stop offset="0.931035" stop-color="#FF8E00"/><stop offset="0.965517" stop-color="#FF8E00"/><stop offset="1" stop-color="#FF8F00"/></linearGradient><clipPath id="clip41"><rect x="1083" y="314" width="3424" height="1553"/></clipPath><linearGradient x1="4424" y1="1831" x2="4462" y2="1831" gradientUnits="userSpaceOnUse" spreadMethod="reflect" id="fill42"><stop offset="0" stop-color="#FF7101"/><stop offset="0.0344828" stop-color="#FF7200"/><stop offset="0.0689655" stop-color="#FF7400"/><stop offset="0.103448" stop-color="#FF7600"/><stop offset="0.137931" stop-color="#FF7800"/><stop offset="0.172414" stop-color="#FF7900"/><stop offset="0.206897" stop-color="#FF7B00"/><stop offset="0.241379" stop-color="#FF7D00"/><stop offset="0.275862" stop-color="#FF7E00"/><stop offset="0.310345" stop-color="#FF8000"/><stop offset="0.344828" stop-color="#FF8100"/><stop offset="0.37931" stop-color="#FF8200"/><stop offset="0.413793" stop-color="#FF8300"/><stop offset="0.448276" stop-color="#FF8500"/><stop offset="0.482759" stop-color="#FF8600"/><stop offset="0.517241" stop-color="#FF8700"/><stop offset="0.551724" stop-color="#FF8800"/><stop offset="0.586207" stop-color="#FF8900"/><stop offset="0.62069" stop-color="#FF8A00"/><stop offset="0.655172" stop-color="#FF8A00"/><stop offset="0.689655" stop-color="#FF8B00"/><stop offset="0.724138" stop-color="#FF8C00"/><stop offset="0.758621" stop-color="#FF8C00"/><stop offset="0.793103" stop-color="#FF8D00"/><stop offset="0.827586" stop-color="#FF8D00"/><stop offset="0.862069" stop-color="#FF8E00"/><stop offset="0.896552" stop-color="#FF8E00"/><stop offset="0.931035" stop-color="#FF8E00"/><stop offset="0.965517" stop-color="#FF8E00"/><stop offset="1" stop-color="#FF8F00"/></linearGradient><clipPath id="clip43"><rect x="855" y="144" width="3981" height="2327"/></clipPath><clipPath id="clip44"><rect x="855" y="144" width="3981" height="2327"/></clipPath><clipPath id="clip45"><rect x="855" y="144" width="3981" height="2327"/></clipPath><clipPath id="clip46"><rect x="855" y="144" width="3981" height="2327"/></clipPath><clipPath id="clip47"><rect x="855" y="144" width="3981" height="2327"/></clipPath><clipPath id="clip48"><rect x="855" y="144" width="3981" height="2327"/></clipPath><clipPath id="clip49"><rect x="855" y="144" width="3981" height="2327"/></clipPath><clipPath id="clip50"><rect x="855" y="144" width="3981" height="2327"/></clipPath><clipPath id="clip51"><rect x="855" y="144" width="3981" height="2327"/></clipPath><clipPath id="clip52"><rect x="855" y="144" width="3981" height="2327"/></clipPath><clipPath id="clip53"><rect x="855" y="144" width="3981" height="2327"/></clipPath><clipPath id="clip54"><rect x="855" y="144" width="3981" height="2327"/></clipPath><clipPath id="clip55"><rect x="855" y="144" width="3981" height="2327"/></clipPath><clipPath id="clip56"><rect x="855" y="144" width="3981" height="2327"/></clipPath><clipPath id="clip57"><rect x="855" y="144" width="3981" height="2327"/></clipPath><clipPath id="clip58"><rect x="855" y="144" width="3981" height="2327"/></clipPath><clipPath id="clip59"><rect x="855" y="144" width="3981" height="2327"/></clipPath><clipPath id="clip60"><rect x="855" y="144" width="3981" height="2327"/></clipPath><clipPath id="clip61"><rect x="855" y="144" width="3981" height="2327"/></clipPath><clipPath id="clip62"><rect x="855" y="144" width="3981" height="2327"/></clipPath><clipPath id="clip63"><rect x="855" y="144" width="3981" height="2327"/></clipPath><clipPath id="clip64"><rect x="855" y="144" width="3981" height="2327"/></clipPath><clipPath id="clip65"><rect x="855" y="144" width="3981" height="2327"/></clipPath><clipPath id="clip66"><rect x="855" y="144" width="3981" height="2327"/></clipPath><clipPath id="clip67"><rect x="855" y="144" width="3981" height="2327"/></clipPath><clipPath id="clip68"><rect x="855" y="144" width="3981" height="2327"/></clipPath><clipPath id="clip69"><rect x="855" y="144" width="3981" height="2327"/></clipPath><clipPath id="clip70"><rect x="855" y="144" width="3981" height="2327"/></clipPath><clipPath id="clip71"><rect x="855" y="144" width="3981" height="2327"/></clipPath><clipPath id="clip72"><rect x="855" y="144" width="3981" height="2327"/></clipPath><clipPath id="clip73"><rect x="855" y="144" width="3981" height="2327"/></clipPath><clipPath id="clip74"><rect x="855" y="144" width="3981" height="2327"/></clipPath><clipPath id="clip75"><rect x="855" y="144" width="3981" height="2327"/></clipPath><clipPath id="clip76"><rect x="855" y="144" width="3981" height="2327"/></clipPath><clipPath id="clip77"><rect x="855" y="144" width="3981" height="2327"/></clipPath><clipPath id="clip78"><rect x="855" y="144" width="3981" height="2327"/></clipPath><clipPath id="clip79"><rect x="855" y="144" width="3981" height="2327"/></clipPath><clipPath id="clip80"><rect x="855" y="144" width="3981" height="2327"/></clipPath><clipPath id="clip81"><rect x="855" y="144" width="3981" height="2327"/></clipPath><clipPath id="clip82"><rect x="855" y="144" width="3981" height="2327"/></clipPath><clipPath id="clip83"><rect x="855" y="144" width="3981" height="2327"/></clipPath><clipPath id="clip84"><rect x="855" y="144" width="3981" height="2327"/></clipPath><clipPath id="clip85"><rect x="855" y="144" width="3981" height="2327"/></clipPath><clipPath id="clip86"><rect x="855" y="144" width="3981" height="2327"/></clipPath><clipPath id="clip87"><rect x="855" y="144" width="3981" height="2327"/></clipPath><clipPath id="clip88"><rect x="855" y="144" width="3981" height="2327"/></clipPath><clipPath id="clip89"><rect x="855" y="144" width="3981" height="2327"/></clipPath><clipPath id="clip90"><rect x="855" y="144" width="3981" height="2327"/></clipPath><clipPath id="clip91"><rect x="855" y="144" width="3981" height="2327"/></clipPath><clipPath id="clip92"><rect x="855" y="144" width="3981" height="2327"/></clipPath><clipPath id="clip93"><rect x="855" y="144" width="3981" height="2327"/></clipPath><clipPath id="clip94"><rect x="855" y="144" width="3981" height="2327"/></clipPath><clipPath id="clip95"><rect x="855" y="144" width="3981" height="2327"/></clipPath><clipPath id="clip96"><rect x="855" y="144" width="3981" height="2327"/></clipPath><clipPath id="clip97"><rect x="855" y="144" width="3981" height="2327"/></clipPath><clipPath id="clip98"><rect x="855" y="144" width="3981" height="2327"/></clipPath><clipPath id="clip99"><rect x="855" y="144" width="3981" height="2327"/></clipPath><clipPath id="clip100"><rect x="855" y="144" width="3981" height="2327"/></clipPath><clipPath id="clip101"><rect x="855" y="144" width="3981" height="2327"/></clipPath><clipPath id="clip102"><rect x="855" y="144" width="3981" height="2327"/></clipPath><clipPath id="clip103"><rect x="855" y="144" width="3981" height="2327"/></clipPath><clipPath id="clip104"><rect x="855" y="144" width="3981" height="2327"/></clipPath><clipPath id="clip105"><rect x="855" y="144" width="3981" height="2327"/></clipPath><clipPath id="clip106"><rect x="855" y="144" width="3981" height="2327"/></clipPath><clipPath id="clip107"><rect x="855" y="144" width="3981" height="2327"/></clipPath><clipPath id="clip108"><rect x="855" y="144" width="3981" height="2327"/></clipPath><clipPath id="clip109"><rect x="855" y="144" width="3981" height="2327"/></clipPath><clipPath id="clip110"><rect x="855" y="144" width="3981" height="2327"/></clipPath><clipPath id="clip111"><rect x="855" y="144" width="3981" height="2327"/></clipPath><clipPath id="clip112"><rect x="855" y="144" width="3981" height="2327"/></clipPath><clipPath id="clip113"><rect x="855" y="144" width="3981" height="2327"/></clipPath><clipPath id="clip114"><rect x="855" y="144" width="3981" height="2327"/></clipPath><clipPath id="clip115"><rect x="855" y="144" width="3981" height="2327"/></clipPath><clipPath id="clip116"><rect x="855" y="144" width="3981" height="2327"/></clipPath><clipPath id="clip117"><rect x="855" y="144" width="3981" height="2327"/></clipPath><clipPath id="clip118"><rect x="855" y="144" width="3981" height="2327"/></clipPath><clipPath id="clip119"><rect x="855" y="144" width="3981" height="2327"/></clipPath><clipPath id="clip120"><rect x="855" y="144" width="3981" height="2327"/></clipPath><clipPath id="clip121"><rect x="855" y="144" width="3981" height="2327"/></clipPath><clipPath id="clip122"><rect x="855" y="144" width="3981" height="2327"/></clipPath><clipPath id="clip123"><rect x="855" y="144" width="3981" height="2327"/></clipPath><clipPath id="clip124"><rect x="855" y="144" width="3981" height="2327"/></clipPath><clipPath id="clip125"><rect x="855" y="144" width="3981" height="2327"/></clipPath><clipPath id="clip126"><rect x="855" y="144" width="3981" height="2327"/></clipPath><clipPath id="clip127"><rect x="855" y="144" width="3981" height="2327"/></clipPath><clipPath id="clip128"><rect x="855" y="144" width="3981" height="2327"/></clipPath><clipPath id="clip129"><rect x="855" y="144" width="3981" height="2327"/></clipPath><clipPath id="clip130"><rect x="855" y="144" width="3981" height="2327"/></clipPath><clipPath id="clip131"><rect x="855" y="144" width="3981" height="2327"/></clipPath><clipPath id="clip132"><rect x="855" y="144" width="3981" height="2327"/></clipPath><clipPath id="clip133"><rect x="855" y="144" width="3981" height="2327"/></clipPath><clipPath id="clip134"><rect x="855" y="144" width="3981" height="2327"/></clipPath><clipPath id="clip135"><rect x="855" y="144" width="3981" height="2327"/></clipPath><clipPath id="clip136"><rect x="855" y="144" width="3981" height="2327"/></clipPath><clipPath id="clip137"><rect x="855" y="144" width="3981" height="2327"/></clipPath><clipPath id="clip138"><rect x="855" y="144" width="3981" height="2327"/></clipPath><clipPath id="clip139"><rect x="855" y="144" width="3981" height="2327"/></clipPath><clipPath id="clip140"><rect x="855" y="144" width="3981" height="2327"/></clipPath><clipPath id="clip141"><rect x="855" y="144" width="3981" height="2327"/></clipPath><clipPath id="clip142"><rect x="855" y="144" width="3981" height="2327"/></clipPath><clipPath id="clip143"><rect x="855" y="144" width="3981" height="2327"/></clipPath><clipPath id="clip144"><rect x="855" y="144" width="3981" height="2327"/></clipPath><clipPath id="clip145"><rect x="855" y="144" width="3981" height="2327"/></clipPath><clipPath id="clip146"><rect x="855" y="144" width="3981" height="2327"/></clipPath><clipPath id="clip147"><rect x="855" y="144" width="3981" height="2327"/></clipPath><clipPath id="clip148"><rect x="855" y="144" width="3981" height="2327"/></clipPath><clipPath id="clip149"><rect x="855" y="144" width="3981" height="2327"/></clipPath><clipPath id="clip150"><rect x="855" y="144" width="3981" height="2327"/></clipPath><clipPath id="clip151"><rect x="855" y="144" width="3981" height="2327"/></clipPath><clipPath id="clip152"><rect x="855" y="144" width="3981" height="2327"/></clipPath><clipPath id="clip153"><rect x="855" y="144" width="3981" height="2327"/></clipPath><clipPath id="clip154"><rect x="855" y="144" width="3981" height="2327"/></clipPath><clipPath id="clip155"><rect x="855" y="144" width="3981" height="2327"/></clipPath><clipPath id="clip156"><rect x="855" y="144" width="3981" height="2327"/></clipPath><clipPath id="clip157"><rect x="855" y="144" width="3981" height="2327"/></clipPath><clipPath id="clip158"><rect x="855" y="144" width="3981" height="2327"/></clipPath><clipPath id="clip159"><rect x="855" y="144" width="3981" height="2327"/></clipPath><clipPath id="clip160"><rect x="855" y="144" width="3981" height="2327"/></clipPath><clipPath id="clip161"><rect x="855" y="144" width="3981" height="2327"/></clipPath><clipPath id="clip162"><rect x="855" y="144" width="3981" height="2327"/></clipPath><clipPath id="clip163"><rect x="855" y="144" width="3981" height="2327"/></clipPath><clipPath id="clip164"><rect x="855" y="144" width="3981" height="2327"/></clipPath><clipPath id="clip165"><rect x="855" y="144" width="3981" height="2327"/></clipPath><clipPath id="clip166"><rect x="855" y="144" width="3981" height="2327"/></clipPath><clipPath id="clip167"><rect x="855" y="144" width="3981" height="2327"/></clipPath><clipPath id="clip168"><rect x="855" y="144" width="3981" height="2327"/></clipPath><linearGradient x1="4576" y1="1406.5" x2="4599" y2="1406.5" gradientUnits="userSpaceOnUse" spreadMethod="reflect" id="fill169"><stop offset="0" stop-color="#FF7101"/><stop offset="0.0344828" stop-color="#FF7200"/><stop offset="0.0689655" stop-color="#FF7400"/><stop offset="0.103448" stop-color="#FF7600"/><stop offset="0.137931" stop-color="#FF7800"/><stop offset="0.172414" stop-color="#FF7900"/><stop offset="0.206897" stop-color="#FF7B00"/><stop offset="0.241379" stop-color="#FF7D00"/><stop offset="0.275862" stop-color="#FF7E00"/><stop offset="0.310345" stop-color="#FF8000"/><stop offset="0.344828" stop-color="#FF8100"/><stop offset="0.37931" stop-color="#FF8200"/><stop offset="0.413793" stop-color="#FF8300"/><stop offset="0.448276" stop-color="#FF8500"/><stop offset="0.482759" stop-color="#FF8600"/><stop offset="0.517241" stop-color="#FF8700"/><stop offset="0.551724" stop-color="#FF8800"/><stop offset="0.586207" stop-color="#FF8900"/><stop offset="0.62069" stop-color="#FF8A00"/><stop offset="0.655172" stop-color="#FF8A00"/><stop offset="0.689655" stop-color="#FF8B00"/><stop offset="0.724138" stop-color="#FF8C00"/><stop offset="0.758621" stop-color="#FF8C00"/><stop offset="0.793103" stop-color="#FF8D00"/><stop offset="0.827586" stop-color="#FF8D00"/><stop offset="0.862069" stop-color="#FF8E00"/><stop offset="0.896552" stop-color="#FF8E00"/><stop offset="0.931035" stop-color="#FF8E00"/><stop offset="0.965517" stop-color="#FF8E00"/><stop offset="1" stop-color="#FF8F00"/></linearGradient><clipPath id="clip170"><rect x="855" y="144" width="3981" height="2327"/></clipPath></defs><g clip-path="url(#clip0)" transform="translate(-855 -143)"><rect x="856" y="144" width="3981" height="2328" fill="#FFFFFF"/><g clip-path="url(#clip1)"><path d="M1083.5 1605.55 4504.5 1605.55M1083.5 1347.54 4504.5 1347.54M1083.5 1089.54 4504.5 1089.54M1083.5 830.527 4504.5 830.527M1083.5 572.519 4504.5 572.519M1083.5 314.5 4504.5 314.5" stroke="#D9D9D9" stroke-width="3.4375" stroke-linejoin="round" stroke-miterlimit="10" fill="none"/></g><g clip-path="url(#clip2)"><path d="M1126.04 1723.06 1164.04 1723.06 1164.04 1864 1126.04 1864ZM1297.04 1384.05 1335.04 1384.05 1335.04 1864 1297.04 1864ZM1468.05 979.032 1506.05 979.032 1506.05 1864 1468.05 1864ZM1639.05 393.013 1677.06 393.013 1677.06 1864 1639.05 1864ZM1810.06 1712.06 1848.06 1712.06 1848.06 1864 1810.06 1864ZM1981.06 1469.05 2019.07 1469.05 2019.07 1864 1981.06 1864ZM2152.07 1084.04 2190.07 1084.04 2190.07 1864 2152.07 1864ZM2323.08 525.017 2361.08 525.017 2361.08 1864 2323.08 1864ZM2494.08 1802.06 2532.08 1802.06 2532.08 1864 2494.08 1864ZM2665.09 1690.06 2703.09 1690.06 2703.09 1864 2665.09 1864ZM2836.09 1450.05 2874.09 1450.05 2874.09 1864 2836.09 1864ZM3007.1 1070.04 3045.1 1070.04 3045.1 1864 3007.1 1864ZM3178.1 1853.06 3216.11 1853.06 3216.11 1864 3178.1 1864ZM3349.11 1802.06 3387.11 1802.06 3387.11 1864 3349.11 1864ZM3520.12 1723.06 3558.12 1723.06 3558.12 1864 3520.12 1864ZM3691.12 1582.05 3729.12 1582.05 3729.12 1864 3691.12 1864ZM3862.13 1857.06 3900.13 1857.06 3900.13 1864 3862.13 1864ZM4033.13 1832.06 4071.13 1832.06 4071.13 1864 4033.13 1864ZM4204.14 1768.06 4242.14 1768.06 4242.14 1864 4204.14 1864ZM4375.14 1681.06 4413.15 1681.06 4413.15 1864 4375.14 1864Z" fill="#C00000"/></g><g clip-path="url(#clip3)"><rect x="1175" y="1846" width="38.0001" height="18.0002" fill="url(#fill4)"/></g><g clip-path="url(#clip5)"><rect x="1346" y="1796" width="37.9998" height="68.0001" fill="url(#fill6)"/></g><g clip-path="url(#clip7)"><rect x="1517" y="1716" width="38.0001" height="148" fill="url(#fill8)"/></g><g clip-path="url(#clip9)"><rect x="1688" y="1572" width="37.9998" height="292" fill="url(#fill10)"/></g><g clip-path="url(#clip11)"><rect x="1859" y="1847" width="38.0001" height="17.0002" fill="url(#fill12)"/></g><g clip-path="url(#clip13)"><rect x="2030" y="1814" width="38.0001" height="50.0002" fill="url(#fill14)"/></g><g clip-path="url(#clip15)"><rect x="2201" y="1744" width="37.9998" height="120" fill="url(#fill16)"/></g><g clip-path="url(#clip17)"><rect x="2372" y="1577" width="38.0002" height="287" fill="url(#fill18)"/></g><g clip-path="url(#clip19)"><rect x="2543" y="1854" width="37.9998" height="10.0004" fill="url(#fill20)"/></g><g clip-path="url(#clip21)"><rect x="2714" y="1835" width="38.0002" height="29.0002" fill="url(#fill22)"/></g><g clip-path="url(#clip23)"><rect x="2885" y="1802" width="37.9998" height="62.0002" fill="url(#fill24)"/></g><g clip-path="url(#clip25)"><rect x="3056" y="1748" width="38.0002" height="116" fill="url(#fill26)"/></g><g clip-path="url(#clip27)"><rect x="3227" y="1861" width="37.9998" height="3.00012" fill="url(#fill28)"/></g><g clip-path="url(#clip29)"><rect x="3398" y="1850" width="38.0002" height="14.0001" fill="url(#fill30)"/></g><g clip-path="url(#clip31)"><rect x="3569" y="1832" width="37.9998" height="32.0004" fill="url(#fill32)"/></g><g clip-path="url(#clip33)"><rect x="3740" y="1794" width="38.0002" height="70.0001" fill="url(#fill34)"/></g><g clip-path="url(#clip35)"><rect x="3911" y="1862" width="38.0002" height="2.00012" fill="url(#fill36)"/></g><g clip-path="url(#clip37)"><rect x="4082" y="1853" width="38" height="11" fill="url(#fill38)"/></g><g clip-path="url(#clip39)"><rect x="4253" y="1834" width="38" height="30.0002" fill="url(#fill40)"/></g><g clip-path="url(#clip41)"><rect x="4424" y="1798" width="38" height="66" fill="url(#fill42)"/></g><g clip-path="url(#clip43)"><path d="M1083.5 1863.5 4504.5 1863.5" stroke="#D9D9D9" stroke-width="3.4375" stroke-linejoin="round" stroke-miterlimit="10" fill="none" fill-rule="evenodd"/></g><g clip-path="url(#clip44)"><path d="M1083.5 1863.5 1083.5 1943.5M1254.54 1863.5 1254.54 1943.5M1425.55 1863.5 1425.55 1943.5M1596.55 1863.5 1596.55 1943.5M1767.56 1863.5 1767.56 1943.5M1938.56 1863.5 1938.56 1943.5M2109.57 1863.5 2109.57 1943.5M2280.57 1863.5 2280.57 1943.5M2451.58 1863.5 2451.58 1943.5M2622.59 1863.5 2622.59 1943.5M2793.59 1863.5 2793.59 1943.5M2964.6 1863.5 2964.6 1943.5M3135.6 1863.5 3135.6 1943.5M3307.61 1863.5 3307.61 1943.5M3478.61 1863.5 3478.61 1943.5M3649.62 1863.5 3649.62 1943.5M3820.63 1863.5 3820.63 1943.5M3991.63 1863.5 3991.63 1943.5M4162.64 1863.5 4162.64 1943.5M4333.64 1863.5 4333.64 1943.5M4504.5 1863.5 4504.5 1943.5" stroke="#D9D9D9" stroke-width="3.4375" stroke-linejoin="round" stroke-miterlimit="10" fill="none"/></g><g clip-path="url(#clip45)"><path d="M1083.5 1943.5 1083.5 2023.5M1254.54 1943.5 1254.54 2023.5M1425.55 1943.5 1425.55 2023.5M1596.55 1943.5 1596.55 2023.5M1767.56 1943.5 1767.56 2023.5M1938.56 1943.5 1938.56 2023.5M2109.57 1943.5 2109.57 2023.5M2280.57 1943.5 2280.57 2023.5M2451.58 1943.5 2451.58 2023.5M2622.59 1943.5 2622.59 2023.5M2793.59 1943.5 2793.59 2023.5M2964.6 1943.5 2964.6 2023.5M3135.6 1943.5 3135.6 2023.5M3307.61 1943.5 3307.61 2023.5M3478.61 1943.5 3478.61 2023.5M3649.62 1943.5 3649.62 2023.5M3820.63 1943.5 3820.63 2023.5M3991.63 1943.5 3991.63 2023.5M4162.64 1943.5 4162.64 2023.5M4333.64 1943.5 4333.64 2023.5M4504.5 1943.5 4504.5 2023.5" stroke="#D9D9D9" stroke-width="3.4375" stroke-linejoin="round" stroke-miterlimit="10" fill="none"/></g><g clip-path="url(#clip46)"><path d="M1083.5 2023.5 1083.5 2103.5M1254.54 2023.5 1254.54 2103.5M1425.55 2023.5 1425.55 2103.5M1596.55 2023.5 1596.55 2103.5M1767.56 2023.5 1767.56 2103.5M1938.56 2023.5 1938.56 2103.5M2109.57 2023.5 2109.57 2103.5M2280.57 2023.5 2280.57 2103.5M2451.58 2023.5 2451.58 2103.5M2622.59 2023.5 2622.59 2103.5M2793.59 2023.5 2793.59 2103.5M2964.6 2023.5 2964.6 2103.5M3135.6 2023.5 3135.6 2103.5M3307.61 2023.5 3307.61 2103.5M3478.61 2023.5 3478.61 2103.5M3649.62 2023.5 3649.62 2103.5M3820.63 2023.5 3820.63 2103.5M3991.63 2023.5 3991.63 2103.5M4162.64 2023.5 4162.64 2103.5M4333.64 2023.5 4333.64 2103.5M4504.5 2023.5 4504.5 2103.5" stroke="#D9D9D9" stroke-width="3.4375" stroke-linejoin="round" stroke-miterlimit="10" fill="none"/></g><g clip-path="url(#clip47)"><path d="M1083.5 2103.5 1083.5 2183.5M1767.56 2103.5 1767.56 2183.5M2451.58 2103.5 2451.58 2183.5M3135.6 2103.5 3135.6 2183.5M3820.63 2103.5 3820.63 2183.5M4504.5 2103.5 4504.5 2183.5" stroke="#D9D9D9" stroke-width="3.4375" stroke-linejoin="round" stroke-miterlimit="10" fill="none"/></g><g clip-path="url(#clip48)"><text fill="#404040" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 1124.15 1691)">27</text></g><g clip-path="url(#clip49)"><text fill="#404040" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 1295.16 1353)">93</text></g><g clip-path="url(#clip50)"><text fill="#404040" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 1455.73 947)">171</text></g><g clip-path="url(#clip51)"><text fill="#404040" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 1626.75 361)">285</text></g><g clip-path="url(#clip52)"><text fill="#404040" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 1808.22 1680)">29</text></g><g clip-path="url(#clip53)"><text fill="#404040" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 1979.23 1437)">76</text></g><g clip-path="url(#clip54)"><text fill="#404040" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 2139.8 1052)">151</text></g><g clip-path="url(#clip55)"><text fill="#404040" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 2310.82 493)">259</text></g><g clip-path="url(#clip56)"><text fill="#404040" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 2492.29 1770)">12</text></g><g clip-path="url(#clip57)"><text fill="#404040" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 2663.3 1658)">34</text></g><g clip-path="url(#clip58)"><text fill="#404040" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 2834.32 1418)">80</text></g><g clip-path="url(#clip59)"><text fill="#404040" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 2994.89 1039)">154</text></g><g clip-path="url(#clip60)"><text fill="#404040" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 3186.81 1821)">2</text></g><g clip-path="url(#clip61)"><text fill="#404040" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 3347.38 1770)">12</text></g><g clip-path="url(#clip62)"><text fill="#404040" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 3518.39 1691)">27</text></g><g clip-path="url(#clip63)"><text fill="#404040" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 3689.41 1550)">55</text></g><g clip-path="url(#clip64)"><text fill="#404040" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 3870.88 1826)">1</text></g><g clip-path="url(#clip65)"><text fill="#404040" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 4041.9 1800)">6</text></g><g clip-path="url(#clip66)"><text fill="#404040" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 4202.46 1736)">19</text></g><g clip-path="url(#clip67)"><text fill="#404040" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 4373.48 1649)">35</text></g><g clip-path="url(#clip68)"><text fill="#404040" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 1183.29 1814)">3</text></g><g clip-path="url(#clip69)"><text fill="#404040" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 1343.86 1764)">13</text></g><g clip-path="url(#clip70)"><text fill="#404040" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 1514.88 1684)">29</text></g><g clip-path="url(#clip71)"><text fill="#404040" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 1685.9 1540)">56</text></g><g clip-path="url(#clip72)"><text fill="#404040" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 1867.36 1815)">3</text></g><g clip-path="url(#clip73)"><text fill="#404040" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 2027.93 1782)">10</text></g><g clip-path="url(#clip74)"><text fill="#404040" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 2198.95 1712)">23</text></g><g clip-path="url(#clip75)"><text fill="#404040" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 2369.97 1545)">56</text></g><g clip-path="url(#clip76)"><text fill="#404040" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 2551.44 1822)">2</text></g><g clip-path="url(#clip77)"><text fill="#404040" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 2722.45 1803)">6</text></g><g clip-path="url(#clip78)"><text fill="#404040" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 2883.02 1770)">12</text></g><g clip-path="url(#clip79)"><text fill="#404040" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 3054.04 1716)">22</text></g><g clip-path="url(#clip80)"><text fill="#404040" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 3235.51 1829)">1</text></g><g clip-path="url(#clip81)"><text fill="#404040" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 3406.52 1818)">3</text></g><g clip-path="url(#clip82)"><text fill="#404040" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 3577.54 1800)">6</text></g><g clip-path="url(#clip83)"><text fill="#404040" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 3738.11 1762)">14</text></g><g clip-path="url(#clip84)"><text fill="#404040" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 3919.58 1830)">0</text></g><g clip-path="url(#clip85)"><text fill="#404040" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 4090.59 1821)">2</text></g><g clip-path="url(#clip86)"><text fill="#404040" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 4261.61 1802)">6</text></g><g clip-path="url(#clip87)"><text fill="#404040" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 4422.18 1766)">13</text></g><g clip-path="url(#clip88)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 1024.72 1875)">0</text></g><g clip-path="url(#clip89)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 1003.82 1617)">50</text></g><g clip-path="url(#clip90)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 982.919 1359)">100</text></g><g clip-path="url(#clip91)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 982.919 1101)">150</text></g><g clip-path="url(#clip92)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 982.919 842)">200</text></g><g clip-path="url(#clip93)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 982.919 584)">250</text></g><g clip-path="url(#clip94)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 982.919 326)">300</text></g><g clip-path="url(#clip95)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 1148.49 1929)">48</text></g><g clip-path="url(#clip96)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 1319.51 1929)">24</text></g><g clip-path="url(#clip97)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 1490.53 1929)">12</text></g><g clip-path="url(#clip98)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 1672 1929)">6</text></g><g clip-path="url(#clip99)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 1832.57 1929)">48</text></g><g clip-path="url(#clip100)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 2003.58 1929)">24</text></g><g clip-path="url(#clip101)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 2174.6 1929)">12</text></g><g clip-path="url(#clip102)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 2356.07 1929)">6</text></g><g clip-path="url(#clip103)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 2516.64 1929)">48</text></g><g clip-path="url(#clip104)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 2687.65 1929)">24</text></g><g clip-path="url(#clip105)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 2858.67 1929)">12</text></g><g clip-path="url(#clip106)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 3040.14 1929)">6</text></g><g clip-path="url(#clip107)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 3200.71 1929)">48</text></g><g clip-path="url(#clip108)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 3371.72 1929)">24</text></g><g clip-path="url(#clip109)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 3542.74 1929)">12</text></g><g clip-path="url(#clip110)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 3724.21 1929)">6</text></g><g clip-path="url(#clip111)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 3884.78 1929)">48</text></g><g clip-path="url(#clip112)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 4055.8 1929)">24</text></g><g clip-path="url(#clip113)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 4226.81 1929)">12</text></g><g clip-path="url(#clip114)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 4408.28 1929)">6</text></g><g clip-path="url(#clip115)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 1158.94 2009)">8</text></g><g clip-path="url(#clip116)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 1329.96 2009)">4</text></g><g clip-path="url(#clip117)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 1500.98 2009)">2</text></g><g clip-path="url(#clip118)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 1672 2009)">1</text></g><g clip-path="url(#clip119)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 1843.02 2009)">8</text></g><g clip-path="url(#clip120)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 2014.03 2009)">4</text></g><g clip-path="url(#clip121)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 2185.05 2009)">2</text></g><g clip-path="url(#clip122)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 2356.07 2009)">1</text></g><g clip-path="url(#clip123)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 2527.09 2009)">8</text></g><g clip-path="url(#clip124)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 2698.1 2009)">4</text></g><g clip-path="url(#clip125)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 2869.12 2009)">2</text></g><g clip-path="url(#clip126)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 3040.14 2009)">1</text></g><g clip-path="url(#clip127)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 3211.16 2009)">8</text></g><g clip-path="url(#clip128)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 3382.17 2009)">4</text></g><g clip-path="url(#clip129)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 3553.19 2009)">2</text></g><g clip-path="url(#clip130)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 3724.21 2009)">1</text></g><g clip-path="url(#clip131)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 3895.23 2009)">8</text></g><g clip-path="url(#clip132)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 4066.25 2009)">4</text></g><g clip-path="url(#clip133)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 4237.26 2009)">2</text></g><g clip-path="url(#clip134)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 4408.28 2009)">1</text></g><g clip-path="url(#clip135)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 1158.94 2089)">1</text></g><g clip-path="url(#clip136)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 1329.96 2089)">2</text></g><g clip-path="url(#clip137)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 1500.98 2089)">4</text></g><g clip-path="url(#clip138)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 1672 2089)">8</text></g><g clip-path="url(#clip139)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 1843.02 2089)">1</text></g><g clip-path="url(#clip140)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 2014.03 2089)">2</text></g><g clip-path="url(#clip141)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 2185.05 2089)">4</text></g><g clip-path="url(#clip142)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 2356.07 2089)">8</text></g><g clip-path="url(#clip143)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 2527.09 2089)">1</text></g><g clip-path="url(#clip144)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 2698.1 2089)">2</text></g><g clip-path="url(#clip145)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 2869.12 2089)">4</text></g><g clip-path="url(#clip146)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 3040.14 2089)">8</text></g><g clip-path="url(#clip147)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 3211.16 2089)">1</text></g><g clip-path="url(#clip148)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 3382.17 2089)">2</text></g><g clip-path="url(#clip149)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 3553.19 2089)">4</text></g><g clip-path="url(#clip150)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 3724.21 2089)">8</text></g><g clip-path="url(#clip151)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 3895.23 2089)">1</text></g><g clip-path="url(#clip152)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 4066.25 2089)">2</text></g><g clip-path="url(#clip153)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 4237.26 2089)">4</text></g><g clip-path="url(#clip154)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 4408.28 2089)">8</text></g><g clip-path="url(#clip155)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 1405.02 2169)">20</text></g><g clip-path="url(#clip156)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 2089.09 2169)">32</text></g><g clip-path="url(#clip157)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 2762.71 2169)">128</text></g><g clip-path="url(#clip158)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 3446.78 2169)">384</text></g><g clip-path="url(#clip159)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 4130.85 2169)">512</text></g><g clip-path="url(#clip160)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="46" transform="matrix(6.12323e-17 -1 1 6.12323e-17 957.229 1497)">Sum of instance(s) throughput (inference/s)</text></g><g clip-path="url(#clip161)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="46" transform="matrix(1 0 0 1 2527.04 2235)">Number of core per instance</text></g><g clip-path="url(#clip162)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="46" transform="matrix(1 0 0 1 2581.51 2291)">Batch size per instance</text></g><g clip-path="url(#clip163)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="46" transform="matrix(1 0 0 1 2587.15 2347)">Number of instance(s)</text></g><g clip-path="url(#clip164)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="46" transform="matrix(1 0 0 1 2639.12 2402)">Sequence length</text></g><g clip-path="url(#clip165)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="64" transform="matrix(1 0 0 1 2122.27 240)">Throughput sum over instances with total batch size = 8</text></g><g clip-path="url(#clip166)"><rect x="4576" y="1318" width="23" height="22" fill="#C00000"/></g><g clip-path="url(#clip167)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 4608.56 1341)">pytorch</text></g><g clip-path="url(#clip168)"><rect x="4576" y="1395" width="23" height="23" fill="url(#fill169)"/></g><g clip-path="url(#clip170)"><text fill="#595959" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 4608.56 1418)">tensorflow</text></g><rect x="856.5" y="144.5" width="3981" height="2328" stroke="#D9D9D9" stroke-width="3.4375" stroke-linejoin="round" stroke-miterlimit="10" fill="none"/></g></svg>
7
0
hf_public_repos/blog/assets
hf_public_repos/blog/assets/117_vq_diffusion/vq_diffusion_architecture.svg
<?xml version="1.0" encoding="UTF-8"?> <!-- Do not edit this file with editors other than diagrams.net --> <!DOCTYPE svg PUBLIC "-//W3C//DTD SVG 1.1//EN" "http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd"> <svg xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" version="1.1" width="2058px" height="993px" viewBox="-0.5 -0.5 2058 993" style="background-color: rgb(255, 255, 255);" content="&lt;mxfile host=&quot;app.diagrams.net&quot; modified=&quot;2022-11-22T20:12:49.317Z&quot; agent=&quot;5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/107.0.0.0 Safari/537.36&quot; etag=&quot;SFK3g_a0rMPzCQ28DngY&quot; version=&quot;20.5.3&quot; type=&quot;google&quot;&gt;&lt;diagram id=&quot;9964fbe8-7852-8ceb-564f-9d63d80ee8c5&quot; name=&quot;Page-1&quot;&gt;3Zxbc6M2FIB/jWe2D/YghMB+jO1ku9vtNJ20201fMtjINrMYvIATe399JZAwumBjDInTznYMBxCgc/Sdi0R6cLLefYzdzer3yMNBzzS8XQ9Oe6YJAYDkh0r2uQQAy8kly9j3mOwgePB/YiY0mHTrezgRTkyjKEj9jSicR2GI56kgc+M4ehFPW0SBeNeNu8SK4GHuBqr0H99LV7l0iIyD/FfsL1f8zsBgR2bu/PsyjrYhu1/PhIvsv/zw2uVtsTskK9eLXnJR1gS87cFJHEVpvrXeTXBAO5d3W36fu4qjxXPHOEzrXGDmFzy7wZa9+q5n2gG5dpxsZ/QB0z3rFPvHlj7VeBGFaT/JVHZDTjDRhuh9fDhOtpb01+ANkbtnbeVi9t5FsyZ5IKJWsjN+Wfkpfti4c3rkhZgWka3SdUD2ANl0k02u64W/wx57EmY7JiqaLr88649nHKd4VxKxzviIozVO4z05hR0dMj1yw7XY/kvJCphoVTIALnOZ3S2Lhg99TzZY9+tVAbtTRdoH71IZ4O20YXWojfeoCwDs4cBBb6YPR9EH70X6snX0AY0KfQwGg5JG8uYqVEI6KxX7Pknj6DueREEUE0kYhVRfCz8IJJEb+MuQ7M6JCjCRj2nX+8Tj3LADa9/zgiplZx6FqnZqiOqFRjvqtSxpqNmqaoGp0a3Vgm75MBeUKw+G0Luhjp12YeAmiT8X9UBePd5/ox00QHz3kfUX3vnpN34e2eby+TZ+zjoVHOvEJNrGcyy4y9SNlzgVDBN7QkShdnSpI5GmH7ksxoGb+s9iHKLrXHaH+8jPzF/vv8yRpJ78ZdhF5bhAagdAqSFZz3kXKA0RHbn70mkbekJS/bxQus9QCFfIRt7gwYyKHq1lWd9/POHHr9+cz4+W8+PzchakD1/6UGX7uSypZHuPOg7yz/jxYfdEtGf0nAl92yeDbtPjRg9N3GCzcqloku/PcFreXbrrdbZv/EIFrMl6hKJ2+MWdkYBcdBG18RNj8pruLGuPPjHTIGkcjXtoWs+7MKWy8Jy11uPB6Blc6hsD4AwtwUb6YNTOWBFbtaQGosUiwalkjucaIAAaWzsXbQxfjGwlgB2gJyPvfLQ5KtrgVaENOiIqoNmQbZYpsc3phm02egO2IY29lZDUc7JkwJmW0JTJTCYTEVU6vYwqVZwjq9x6GV1XySixhHAhoyAEIk1M1IrRc1wUkJJMtR1IqVWAxpCSY6w2EQVVRFlXhShUlbCeiyhkiw3JYVxLhAIG1N6nLUQBtaah0EjDIlDJIi2J9Bw6SSE1lzuNlfqICijfxkU1kGeDh3pgLUbxgSmzqApbxxkF7G4YJaEPdRJIqUH7pzWt4prGX7EbJosoXhONmMYUzyOPbknqTlbuhm5u4miOk+R0VeVQyv1jmwY+zeIzeSfFFaAUVyw+NMsZuK66IrOhUQauhg13cfQTh0Q2+fLpnvYyHS6mcRu+w+5FI0PqW3OA1N4daXp3iFroXbs9/2qU/euh1FG43sLDDo52XtmtcuWX/Sq4LscqlTUs1NCvymUNy6jnWJsAS1uxdNd0GISzhP7sMydH6wkfZm46X1G9UMvPnJnj5L8ImL+8S9/Fjf5i32UMDAihoDfQjlVJ7tDpwm8NWxz7QgHgVNVTrBnXIoF19SCQA+PGBU5bCtUvL3BeYiQjxUhanW0SqFMx+0RbEU/UQCncrmdZlEWUiLPu3Pg7HCTvk1B8aLYRXRtiom62Yu32SCIU7IBQRcHjYHwP8xX2tgHVdG4rs/giGyxseeOGFzbEssfNE0n20hVJASuKWmlusPssAYTTPDtMtmt6Mr3UDzxMNnfkqicjs1ByObolot94Qyz7XMTunOzQfJUklZPS7WjbVUU1knmS/ayN/Er5kPAO6hPp3oRe1ZY+JCSwKYZKUGSKq5hyuObQ35GKxsWSgvKUtePwbKDssYoK4SWhf/HEr+7+s53pTtjblxKIwyG6w4/UChOAJk7gFLmSOEFKGFDTQpySMNSsxDXhMC/2lEyFRjsi6/RkIDLbOJZNFI77mbi1iG5gIvA8P1yS4MXok//BERcf4Pfp4iuB0cjFt1Mus8VymWN24dN1hddzsVMBF6DkFqdQU1G3uAxCHDjXO2E5VOb/XhlCZ09YSs5yxHL4yhxKOt8SZzhPdojV8oyoZtEeweIZoWJNhv4fkh8+Vi6f/iRgHPHl3a2WZ0ROAmPUASi1k+hmC/VaDSEbBGqVJBRKNOiqsAfsobRMtPEiNMMQgaGsJG0v+tIbglrDvY+j9Sb9oBn0V7Gq4ag9t7LUAVmmWJVoZ7D3pUnEof1qo10tvn39s//15pbIvBOTiSnJbfHPiLY/3uDYJ89CdXyQ3x+Ep7LibPE2025H2XBfiSS4CZSnwXRskJdINcmF9QuWNFnPhaytTpcviziPWs+1wFciJmq6SK6PRIjbNevjrS1kUzOYxF1vAnw6PIuXsyOx2fVimg+FNkIyy+YB3qVclsygi7Ud+t7oaNKcyoWlZ9VUOJYBN4raTixmu65PCaAcfDVNX01p9YUpM6mj9JU7rW4X3Oqm+SsnKcyK1W6vtVgWtMqrNsPKzlbQ9sWVA3D4agBT48rmAFNimXMRBloBmFDx19JK81HB2wEMDByAHHPoIBNB/sAFzoYDc2iZDjAtx7IdKd+onaOiE5Rs60Mpeb3Ca8CNx3wn4VaelQDvE2ajdwEzIObeVhdLlqb3d1PQv/m0gOjvO2e4WuDHf/u62upleZr45ayHF+62+AavrMDmMwgKms7/uvNIzvdmS6CkMAfBhnGZLX0HZdWsrp2LLjmVBC2X/fX2qq5o7vwvW1w+cfBmyWntCVJxEB1BxZHkFEHpQ4SWUlX+uUThIi+HY4+ug+d/jCU//fAnb+Dtfw==&lt;/diagram&gt;&lt;/mxfile&gt;"><defs/><g><ellipse cx="397" cy="150" rx="40" ry="40" fill="rgb(255, 255, 255)" stroke="rgb(0, 0, 0)" pointer-events="all"/><g transform="translate(-0.5 -0.5)"><switch><foreignObject pointer-events="none" width="100%" height="100%" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility" style="overflow: visible; text-align: left;"><div xmlns="http://www.w3.org/1999/xhtml" style="display: flex; align-items: unsafe center; justify-content: unsafe center; width: 78px; height: 1px; padding-top: 150px; margin-left: 358px;"><div data-drawio-colors="color: rgb(0, 0, 0); " style="box-sizing: border-box; font-size: 0px; text-align: center;"><div style="display: inline-block; font-size: 25px; font-family: Helvetica; color: rgb(0, 0, 0); line-height: 1.2; pointer-events: all; white-space: normal; overflow-wrap: normal;">x<sub style="font-size: 25px;">0</sub></div></div></div></foreignObject><text x="397" y="158" fill="rgb(0, 0, 0)" font-family="Helvetica" font-size="25px" text-anchor="middle">x0</text></switch></g><ellipse cx="1127" cy="150" rx="40" ry="40" fill="rgb(255, 255, 255)" stroke="rgb(0, 0, 0)" pointer-events="all"/><g transform="translate(-0.5 -0.5)"><switch><foreignObject pointer-events="none" width="100%" height="100%" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility" style="overflow: visible; text-align: left;"><div xmlns="http://www.w3.org/1999/xhtml" style="display: flex; align-items: unsafe center; justify-content: unsafe center; width: 78px; height: 1px; padding-top: 150px; margin-left: 1088px;"><div data-drawio-colors="color: rgb(0, 0, 0); " style="box-sizing: border-box; font-size: 0px; text-align: center;"><div style="display: inline-block; font-size: 25px; font-family: Helvetica; color: rgb(0, 0, 0); line-height: 1.2; pointer-events: all; white-space: normal; overflow-wrap: normal;">x<sub style="font-size: 25px;">t-1</sub></div></div></div></foreignObject><text x="1127" y="158" fill="rgb(0, 0, 0)" font-family="Helvetica" font-size="25px" text-anchor="middle">xt-1</text></switch></g><ellipse cx="1485.75" cy="150" rx="40" ry="40" fill="rgb(255, 255, 255)" stroke="rgb(0, 0, 0)" pointer-events="all"/><g transform="translate(-0.5 -0.5)"><switch><foreignObject pointer-events="none" width="100%" height="100%" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility" style="overflow: visible; text-align: left;"><div xmlns="http://www.w3.org/1999/xhtml" style="display: flex; align-items: unsafe center; justify-content: unsafe center; width: 78px; height: 1px; padding-top: 150px; margin-left: 1447px;"><div data-drawio-colors="color: rgb(0, 0, 0); " style="box-sizing: border-box; font-size: 0px; text-align: center;"><div style="display: inline-block; font-size: 25px; font-family: Helvetica; color: rgb(0, 0, 0); line-height: 1.2; pointer-events: all; white-space: normal; overflow-wrap: normal;">x<sub style="font-size: 25px;">t</sub></div></div></div></foreignObject><text x="1486" y="158" fill="rgb(0, 0, 0)" font-family="Helvetica" font-size="25px" text-anchor="middle">xt</text></switch></g><rect x="717" y="130" width="120" height="40" fill="none" stroke="none" pointer-events="all"/><g transform="translate(-0.5 -0.5)"><switch><foreignObject pointer-events="none" width="100%" height="100%" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility" style="overflow: visible; text-align: left;"><div xmlns="http://www.w3.org/1999/xhtml" style="display: flex; align-items: unsafe center; justify-content: unsafe center; width: 118px; height: 1px; padding-top: 150px; margin-left: 718px;"><div data-drawio-colors="color: rgb(0, 0, 0); " style="box-sizing: border-box; font-size: 0px; text-align: center;"><div style="display: inline-block; font-size: 30px; font-family: Helvetica; color: rgb(0, 0, 0); line-height: 1.2; pointer-events: all; white-space: normal; overflow-wrap: normal;"><font style="font-size: 30px;">...</font></div></div></div></foreignObject><text x="777" y="159" fill="rgb(0, 0, 0)" font-family="Helvetica" font-size="30px" text-anchor="middle">...</text></switch></g><path d="M 425.28 121.72 Q 607 50 771.24 127.29" fill="none" stroke="rgb(0, 0, 0)" stroke-miterlimit="10" pointer-events="stroke"/><path d="M 775.99 129.52 L 768.16 129.71 L 771.24 127.29 L 771.14 123.38 Z" fill="rgb(0, 0, 0)" stroke="rgb(0, 0, 0)" stroke-miterlimit="10" pointer-events="all"/><g transform="translate(-0.5 -0.5)"><switch><foreignObject pointer-events="none" width="100%" height="100%" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility" style="overflow: visible; text-align: left;"><div xmlns="http://www.w3.org/1999/xhtml" style="display: flex; align-items: unsafe center; justify-content: unsafe center; width: 1px; height: 1px; padding-top: 41px; margin-left: 587px;"><div data-drawio-colors="color: rgb(0, 0, 0); background-color: rgb(255, 255, 255); " style="box-sizing: border-box; font-size: 0px; text-align: center;"><div style="display: inline-block; font-size: 25px; font-family: Helvetica; color: rgb(0, 0, 0); line-height: 1.2; pointer-events: all; background-color: rgb(255, 255, 255); white-space: nowrap;"><font style="font-size: 25px;"><mjx-container class="MathJax" jax="SVG" display="true"><svg xmlns="http://www.w3.org/2000/svg" width="17.875ex" height="2.262ex" role="img" focusable="false" viewBox="0 -750 7900.8 1000" xmlns:xlink="http://www.w3.org/1999/xlink" style="vertical-align: -0.566ex;"><defs><path id="MJX-1152-TEX-I-1D45E" d="M33 157Q33 258 109 349T280 441Q340 441 372 389Q373 390 377 395T388 406T404 418Q438 442 450 442Q454 442 457 439T460 434Q460 425 391 149Q320 -135 320 -139Q320 -147 365 -148H390Q396 -156 396 -157T393 -175Q389 -188 383 -194H370Q339 -192 262 -192Q234 -192 211 -192T174 -192T157 -193Q143 -193 143 -185Q143 -182 145 -170Q149 -154 152 -151T172 -148Q220 -148 230 -141Q238 -136 258 -53T279 32Q279 33 272 29Q224 -10 172 -10Q117 -10 75 30T33 157ZM352 326Q329 405 277 405Q242 405 210 374T160 293Q131 214 119 129Q119 126 119 118T118 106Q118 61 136 44T179 26Q233 26 290 98L298 109L352 326Z"/><path id="MJX-1152-TEX-N-28" d="M94 250Q94 319 104 381T127 488T164 576T202 643T244 695T277 729T302 750H315H319Q333 750 333 741Q333 738 316 720T275 667T226 581T184 443T167 250T184 58T225 -81T274 -167T316 -220T333 -241Q333 -250 318 -250H315H302L274 -226Q180 -141 137 -14T94 250Z"/><path id="MJX-1152-TEX-I-1D465" d="M52 289Q59 331 106 386T222 442Q257 442 286 424T329 379Q371 442 430 442Q467 442 494 420T522 361Q522 332 508 314T481 292T458 288Q439 288 427 299T415 328Q415 374 465 391Q454 404 425 404Q412 404 406 402Q368 386 350 336Q290 115 290 78Q290 50 306 38T341 26Q378 26 414 59T463 140Q466 150 469 151T485 153H489Q504 153 504 145Q504 144 502 134Q486 77 440 33T333 -11Q263 -11 227 52Q186 -10 133 -10H127Q78 -10 57 16T35 71Q35 103 54 123T99 143Q142 143 142 101Q142 81 130 66T107 46T94 41L91 40Q91 39 97 36T113 29T132 26Q168 26 194 71Q203 87 217 139T245 247T261 313Q266 340 266 352Q266 380 251 392T217 404Q177 404 142 372T93 290Q91 281 88 280T72 278H58Q52 284 52 289Z"/><path id="MJX-1152-TEX-N-31" d="M213 578L200 573Q186 568 160 563T102 556H83V602H102Q149 604 189 617T245 641T273 663Q275 666 285 666Q294 666 302 660V361L303 61Q310 54 315 52T339 48T401 46H427V0H416Q395 3 257 3Q121 3 100 0H88V46H114Q136 46 152 46T177 47T193 50T201 52T207 57T213 61V578Z"/><path id="MJX-1152-TEX-N-7C" d="M139 -249H137Q125 -249 119 -235V251L120 737Q130 750 139 750Q152 750 159 735V-235Q151 -249 141 -249H139Z"/><path id="MJX-1152-TEX-N-30" d="M96 585Q152 666 249 666Q297 666 345 640T423 548Q460 465 460 320Q460 165 417 83Q397 41 362 16T301 -15T250 -22Q224 -22 198 -16T137 16T82 83Q39 165 39 320Q39 494 96 585ZM321 597Q291 629 250 629Q208 629 178 597Q153 571 145 525T137 333Q137 175 145 125T181 46Q209 16 250 16Q290 16 318 46Q347 76 354 130T362 333Q362 478 354 524T321 597Z"/><path id="MJX-1152-TEX-N-3B" d="M78 370Q78 394 95 412T138 430Q162 430 180 414T199 371Q199 346 182 328T139 310T96 327T78 370ZM78 60Q78 85 94 103T137 121Q202 121 202 8Q202 -44 183 -94T144 -169T118 -194Q115 -194 106 -186T95 -174Q94 -171 107 -155T137 -107T160 -38Q161 -32 162 -22T165 -4T165 4Q165 5 161 4T142 0Q110 0 94 18T78 60Z"/><path id="MJX-1152-TEX-I-1D6FC" d="M34 156Q34 270 120 356T309 442Q379 442 421 402T478 304Q484 275 485 237V208Q534 282 560 374Q564 388 566 390T582 393Q603 393 603 385Q603 376 594 346T558 261T497 161L486 147L487 123Q489 67 495 47T514 26Q528 28 540 37T557 60Q559 67 562 68T577 70Q597 70 597 62Q597 56 591 43Q579 19 556 5T512 -10H505Q438 -10 414 62L411 69L400 61Q390 53 370 41T325 18T267 -2T203 -11Q124 -11 79 39T34 156ZM208 26Q257 26 306 47T379 90L403 112Q401 255 396 290Q382 405 304 405Q235 405 183 332Q156 292 139 224T121 120Q121 71 146 49T208 26Z"/><path id="MJX-1152-TEX-N-2C" d="M78 35T78 60T94 103T137 121Q165 121 187 96T210 8Q210 -27 201 -60T180 -117T154 -158T130 -185T117 -194Q113 -194 104 -185T95 -172Q95 -168 106 -156T131 -126T157 -76T173 -3V9L172 8Q170 7 167 6T161 3T152 1T140 0Q113 0 96 17Z"/><path id="MJX-1152-TEX-I-1D6FD" d="M29 -194Q23 -188 23 -186Q23 -183 102 134T186 465Q208 533 243 584T309 658Q365 705 429 705H431Q493 705 533 667T573 570Q573 465 469 396L482 383Q533 332 533 252Q533 139 448 65T257 -10Q227 -10 203 -2T165 17T143 40T131 59T126 65L62 -188Q60 -194 42 -194H29ZM353 431Q392 431 427 419L432 422Q436 426 439 429T449 439T461 453T472 471T484 495T493 524T501 560Q503 569 503 593Q503 611 502 616Q487 667 426 667Q384 667 347 643T286 582T247 514T224 455Q219 439 186 308T152 168Q151 163 151 147Q151 99 173 68Q204 26 260 26Q302 26 349 51T425 137Q441 171 449 214T457 279Q457 337 422 372Q380 358 347 358H337Q258 358 258 389Q258 396 261 403Q275 431 353 431Z"/><path id="MJX-1152-TEX-I-1D6FE" d="M31 249Q11 249 11 258Q11 275 26 304T66 365T129 418T206 441Q233 441 239 440Q287 429 318 386T371 255Q385 195 385 170Q385 166 386 166L398 193Q418 244 443 300T486 391T508 430Q510 431 524 431H537Q543 425 543 422Q543 418 522 378T463 251T391 71Q385 55 378 6T357 -100Q341 -165 330 -190T303 -216Q286 -216 286 -188Q286 -138 340 32L346 51L347 69Q348 79 348 100Q348 257 291 317Q251 355 196 355Q148 355 108 329T51 260Q49 251 47 251Q45 249 31 249Z"/><path id="MJX-1152-TEX-N-29" d="M60 749L64 750Q69 750 74 750H86L114 726Q208 641 251 514T294 250Q294 182 284 119T261 12T224 -76T186 -143T145 -194T113 -227T90 -246Q87 -249 86 -250H74Q66 -250 63 -250T58 -247T55 -238Q56 -237 66 -225Q221 -64 221 250T66 725Q56 737 55 738Q55 746 60 749Z"/></defs><g stroke="currentColor" fill="currentColor" stroke-width="0" transform="scale(1,-1)"><g data-mml-node="math"><g data-mml-node="mi"><use data-c="1D45E" xlink:href="#MJX-1152-TEX-I-1D45E"/></g><g data-mml-node="mo" transform="translate(460,0)"><use data-c="28" xlink:href="#MJX-1152-TEX-N-28"/></g><g data-mml-node="msub" transform="translate(849,0)"><g data-mml-node="mi"><use data-c="1D465" xlink:href="#MJX-1152-TEX-I-1D465"/></g><g data-mml-node="mn" transform="translate(605,-150) scale(0.707)"><use data-c="31" xlink:href="#MJX-1152-TEX-N-31"/></g></g><g data-mml-node="TeXAtom" data-mjx-texclass="ORD" transform="translate(1857.6,0)"><g data-mml-node="mo" transform="translate(0 -0.5)"><use data-c="7C" xlink:href="#MJX-1152-TEX-N-7C"/></g></g><g data-mml-node="msub" transform="translate(2135.6,0)"><g data-mml-node="mi"><use data-c="1D465" xlink:href="#MJX-1152-TEX-I-1D465"/></g><g data-mml-node="mn" transform="translate(605,-150) scale(0.707)"><use data-c="30" xlink:href="#MJX-1152-TEX-N-30"/></g></g><g data-mml-node="mo" transform="translate(3144.1,0)"><use data-c="3B" xlink:href="#MJX-1152-TEX-N-3B"/></g><g data-mml-node="msub" transform="translate(3588.8,0)"><g data-mml-node="mi"><use data-c="1D6FC" xlink:href="#MJX-1152-TEX-I-1D6FC"/></g><g data-mml-node="mn" transform="translate(673,-150) scale(0.707)"><use data-c="30" xlink:href="#MJX-1152-TEX-N-30"/></g></g><g data-mml-node="mo" transform="translate(4665.3,0)"><use data-c="2C" xlink:href="#MJX-1152-TEX-N-2C"/></g><g data-mml-node="msub" transform="translate(5110,0)"><g data-mml-node="mi"><use data-c="1D6FD" xlink:href="#MJX-1152-TEX-I-1D6FD"/></g><g data-mml-node="mn" transform="translate(599,-150) scale(0.707)"><use data-c="30" xlink:href="#MJX-1152-TEX-N-30"/></g></g><g data-mml-node="mo" transform="translate(6112.5,0)"><use data-c="2C" xlink:href="#MJX-1152-TEX-N-2C"/></g><g data-mml-node="msub" transform="translate(6557.2,0)"><g data-mml-node="mi"><use data-c="1D6FE" xlink:href="#MJX-1152-TEX-I-1D6FE"/></g><g data-mml-node="mn" transform="translate(551,-150) scale(0.707)"><use data-c="30" xlink:href="#MJX-1152-TEX-N-30"/></g></g><g data-mml-node="mo" transform="translate(7511.8,0)"><use data-c="29" xlink:href="#MJX-1152-TEX-N-29"/></g></g></g></svg></mjx-container></font></div></div></div></foreignObject><text x="587" y="49" fill="rgb(0, 0, 0)" font-family="Helvetica" font-size="25px" text-anchor="middle">$$ q(x_1 | x_0 ; \alpha_0, \beta_0, \gamma_0 ) $$</text></switch></g><path d="M 777 130 Q 927 50 1092.84 119.26" fill="none" stroke="rgb(0, 0, 0)" stroke-miterlimit="10" pointer-events="stroke"/><path d="M 1097.68 121.28 L 1089.88 121.82 L 1092.84 119.26 L 1092.57 115.36 Z" fill="rgb(0, 0, 0)" stroke="rgb(0, 0, 0)" stroke-miterlimit="10" pointer-events="all"/><g transform="translate(-0.5 -0.5)"><switch><foreignObject pointer-events="none" width="100%" height="100%" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility" style="overflow: visible; text-align: left;"><div xmlns="http://www.w3.org/1999/xhtml" style="display: flex; align-items: unsafe center; justify-content: unsafe center; width: 1px; height: 1px; padding-top: 40px; margin-left: 948px;"><div data-drawio-colors="color: rgb(0, 0, 0); background-color: rgb(255, 255, 255); " style="box-sizing: border-box; font-size: 0px; text-align: center;"><div style="display: inline-block; font-size: 25px; font-family: Helvetica; color: rgb(0, 0, 0); line-height: 1.2; pointer-events: all; background-color: rgb(255, 255, 255); white-space: nowrap;"><mjx-container class="MathJax" jax="SVG" display="true"><svg xmlns="http://www.w3.org/2000/svg" width="26.986ex" height="2.262ex" role="img" focusable="false" viewBox="0 -750 11927.7 1000" xmlns:xlink="http://www.w3.org/1999/xlink" style="vertical-align: -0.566ex;"><defs><path id="MJX-1153-TEX-I-1D45E" d="M33 157Q33 258 109 349T280 441Q340 441 372 389Q373 390 377 395T388 406T404 418Q438 442 450 442Q454 442 457 439T460 434Q460 425 391 149Q320 -135 320 -139Q320 -147 365 -148H390Q396 -156 396 -157T393 -175Q389 -188 383 -194H370Q339 -192 262 -192Q234 -192 211 -192T174 -192T157 -193Q143 -193 143 -185Q143 -182 145 -170Q149 -154 152 -151T172 -148Q220 -148 230 -141Q238 -136 258 -53T279 32Q279 33 272 29Q224 -10 172 -10Q117 -10 75 30T33 157ZM352 326Q329 405 277 405Q242 405 210 374T160 293Q131 214 119 129Q119 126 119 118T118 106Q118 61 136 44T179 26Q233 26 290 98L298 109L352 326Z"/><path id="MJX-1153-TEX-N-28" d="M94 250Q94 319 104 381T127 488T164 576T202 643T244 695T277 729T302 750H315H319Q333 750 333 741Q333 738 316 720T275 667T226 581T184 443T167 250T184 58T225 -81T274 -167T316 -220T333 -241Q333 -250 318 -250H315H302L274 -226Q180 -141 137 -14T94 250Z"/><path id="MJX-1153-TEX-I-1D465" d="M52 289Q59 331 106 386T222 442Q257 442 286 424T329 379Q371 442 430 442Q467 442 494 420T522 361Q522 332 508 314T481 292T458 288Q439 288 427 299T415 328Q415 374 465 391Q454 404 425 404Q412 404 406 402Q368 386 350 336Q290 115 290 78Q290 50 306 38T341 26Q378 26 414 59T463 140Q466 150 469 151T485 153H489Q504 153 504 145Q504 144 502 134Q486 77 440 33T333 -11Q263 -11 227 52Q186 -10 133 -10H127Q78 -10 57 16T35 71Q35 103 54 123T99 143Q142 143 142 101Q142 81 130 66T107 46T94 41L91 40Q91 39 97 36T113 29T132 26Q168 26 194 71Q203 87 217 139T245 247T261 313Q266 340 266 352Q266 380 251 392T217 404Q177 404 142 372T93 290Q91 281 88 280T72 278H58Q52 284 52 289Z"/><path id="MJX-1153-TEX-I-1D461" d="M26 385Q19 392 19 395Q19 399 22 411T27 425Q29 430 36 430T87 431H140L159 511Q162 522 166 540T173 566T179 586T187 603T197 615T211 624T229 626Q247 625 254 615T261 596Q261 589 252 549T232 470L222 433Q222 431 272 431H323Q330 424 330 420Q330 398 317 385H210L174 240Q135 80 135 68Q135 26 162 26Q197 26 230 60T283 144Q285 150 288 151T303 153H307Q322 153 322 145Q322 142 319 133Q314 117 301 95T267 48T216 6T155 -11Q125 -11 98 4T59 56Q57 64 57 83V101L92 241Q127 382 128 383Q128 385 77 385H26Z"/><path id="MJX-1153-TEX-N-2212" d="M84 237T84 250T98 270H679Q694 262 694 250T679 230H98Q84 237 84 250Z"/><path id="MJX-1153-TEX-N-31" d="M213 578L200 573Q186 568 160 563T102 556H83V602H102Q149 604 189 617T245 641T273 663Q275 666 285 666Q294 666 302 660V361L303 61Q310 54 315 52T339 48T401 46H427V0H416Q395 3 257 3Q121 3 100 0H88V46H114Q136 46 152 46T177 47T193 50T201 52T207 57T213 61V578Z"/><path id="MJX-1153-TEX-N-7C" d="M139 -249H137Q125 -249 119 -235V251L120 737Q130 750 139 750Q152 750 159 735V-235Q151 -249 141 -249H139Z"/><path id="MJX-1153-TEX-N-32" d="M109 429Q82 429 66 447T50 491Q50 562 103 614T235 666Q326 666 387 610T449 465Q449 422 429 383T381 315T301 241Q265 210 201 149L142 93L218 92Q375 92 385 97Q392 99 409 186V189H449V186Q448 183 436 95T421 3V0H50V19V31Q50 38 56 46T86 81Q115 113 136 137Q145 147 170 174T204 211T233 244T261 278T284 308T305 340T320 369T333 401T340 431T343 464Q343 527 309 573T212 619Q179 619 154 602T119 569T109 550Q109 549 114 549Q132 549 151 535T170 489Q170 464 154 447T109 429Z"/><path id="MJX-1153-TEX-N-3B" d="M78 370Q78 394 95 412T138 430Q162 430 180 414T199 371Q199 346 182 328T139 310T96 327T78 370ZM78 60Q78 85 94 103T137 121Q202 121 202 8Q202 -44 183 -94T144 -169T118 -194Q115 -194 106 -186T95 -174Q94 -171 107 -155T137 -107T160 -38Q161 -32 162 -22T165 -4T165 4Q165 5 161 4T142 0Q110 0 94 18T78 60Z"/><path id="MJX-1153-TEX-I-1D6FC" d="M34 156Q34 270 120 356T309 442Q379 442 421 402T478 304Q484 275 485 237V208Q534 282 560 374Q564 388 566 390T582 393Q603 393 603 385Q603 376 594 346T558 261T497 161L486 147L487 123Q489 67 495 47T514 26Q528 28 540 37T557 60Q559 67 562 68T577 70Q597 70 597 62Q597 56 591 43Q579 19 556 5T512 -10H505Q438 -10 414 62L411 69L400 61Q390 53 370 41T325 18T267 -2T203 -11Q124 -11 79 39T34 156ZM208 26Q257 26 306 47T379 90L403 112Q401 255 396 290Q382 405 304 405Q235 405 183 332Q156 292 139 224T121 120Q121 71 146 49T208 26Z"/><path id="MJX-1153-TEX-N-2C" d="M78 35T78 60T94 103T137 121Q165 121 187 96T210 8Q210 -27 201 -60T180 -117T154 -158T130 -185T117 -194Q113 -194 104 -185T95 -172Q95 -168 106 -156T131 -126T157 -76T173 -3V9L172 8Q170 7 167 6T161 3T152 1T140 0Q113 0 96 17Z"/><path id="MJX-1153-TEX-I-1D6FD" d="M29 -194Q23 -188 23 -186Q23 -183 102 134T186 465Q208 533 243 584T309 658Q365 705 429 705H431Q493 705 533 667T573 570Q573 465 469 396L482 383Q533 332 533 252Q533 139 448 65T257 -10Q227 -10 203 -2T165 17T143 40T131 59T126 65L62 -188Q60 -194 42 -194H29ZM353 431Q392 431 427 419L432 422Q436 426 439 429T449 439T461 453T472 471T484 495T493 524T501 560Q503 569 503 593Q503 611 502 616Q487 667 426 667Q384 667 347 643T286 582T247 514T224 455Q219 439 186 308T152 168Q151 163 151 147Q151 99 173 68Q204 26 260 26Q302 26 349 51T425 137Q441 171 449 214T457 279Q457 337 422 372Q380 358 347 358H337Q258 358 258 389Q258 396 261 403Q275 431 353 431Z"/><path id="MJX-1153-TEX-I-1D6FE" d="M31 249Q11 249 11 258Q11 275 26 304T66 365T129 418T206 441Q233 441 239 440Q287 429 318 386T371 255Q385 195 385 170Q385 166 386 166L398 193Q418 244 443 300T486 391T508 430Q510 431 524 431H537Q543 425 543 422Q543 418 522 378T463 251T391 71Q385 55 378 6T357 -100Q341 -165 330 -190T303 -216Q286 -216 286 -188Q286 -138 340 32L346 51L347 69Q348 79 348 100Q348 257 291 317Q251 355 196 355Q148 355 108 329T51 260Q49 251 47 251Q45 249 31 249Z"/><path id="MJX-1153-TEX-N-29" d="M60 749L64 750Q69 750 74 750H86L114 726Q208 641 251 514T294 250Q294 182 284 119T261 12T224 -76T186 -143T145 -194T113 -227T90 -246Q87 -249 86 -250H74Q66 -250 63 -250T58 -247T55 -238Q56 -237 66 -225Q221 -64 221 250T66 725Q56 737 55 738Q55 746 60 749Z"/></defs><g stroke="currentColor" fill="currentColor" stroke-width="0" transform="scale(1,-1)"><g data-mml-node="math"><g data-mml-node="mi"><use data-c="1D45E" xlink:href="#MJX-1153-TEX-I-1D45E"/></g><g data-mml-node="mo" transform="translate(460,0)"><use data-c="28" xlink:href="#MJX-1153-TEX-N-28"/></g><g data-mml-node="msub" transform="translate(849,0)"><g data-mml-node="mi"><use data-c="1D465" xlink:href="#MJX-1153-TEX-I-1D465"/></g><g data-mml-node="TeXAtom" transform="translate(605,-150) scale(0.707)" data-mjx-texclass="ORD"><g data-mml-node="mi"><use data-c="1D461" xlink:href="#MJX-1153-TEX-I-1D461"/></g><g data-mml-node="mo" transform="translate(361,0)"><use data-c="2212" xlink:href="#MJX-1153-TEX-N-2212"/></g><g data-mml-node="mn" transform="translate(1139,0)"><use data-c="31" xlink:href="#MJX-1153-TEX-N-31"/></g></g></g><g data-mml-node="TeXAtom" data-mjx-texclass="ORD" transform="translate(2662.9,0)"><g data-mml-node="mo" transform="translate(0 -0.5)"><use data-c="7C" xlink:href="#MJX-1153-TEX-N-7C"/></g></g><g data-mml-node="msub" transform="translate(2940.9,0)"><g data-mml-node="mi"><use data-c="1D465" xlink:href="#MJX-1153-TEX-I-1D465"/></g><g data-mml-node="TeXAtom" transform="translate(605,-150) scale(0.707)" data-mjx-texclass="ORD"><g data-mml-node="mi"><use data-c="1D461" xlink:href="#MJX-1153-TEX-I-1D461"/></g><g data-mml-node="mo" transform="translate(361,0)"><use data-c="2212" xlink:href="#MJX-1153-TEX-N-2212"/></g><g data-mml-node="mn" transform="translate(1139,0)"><use data-c="32" xlink:href="#MJX-1153-TEX-N-32"/></g></g></g><g data-mml-node="mo" transform="translate(4754.9,0)"><use data-c="3B" xlink:href="#MJX-1153-TEX-N-3B"/></g><g data-mml-node="msub" transform="translate(5199.6,0)"><g data-mml-node="mi"><use data-c="1D6FC" xlink:href="#MJX-1153-TEX-I-1D6FC"/></g><g data-mml-node="TeXAtom" transform="translate(673,-150) scale(0.707)" data-mjx-texclass="ORD"><g data-mml-node="mi"><use data-c="1D461" xlink:href="#MJX-1153-TEX-I-1D461"/></g><g data-mml-node="mo" transform="translate(361,0)"><use data-c="2212" xlink:href="#MJX-1153-TEX-N-2212"/></g><g data-mml-node="mn" transform="translate(1139,0)"><use data-c="31" xlink:href="#MJX-1153-TEX-N-31"/></g></g></g><g data-mml-node="mo" transform="translate(7081.5,0)"><use data-c="2C" xlink:href="#MJX-1153-TEX-N-2C"/></g><g data-mml-node="msub" transform="translate(7526.2,0)"><g data-mml-node="mi"><use data-c="1D6FD" xlink:href="#MJX-1153-TEX-I-1D6FD"/></g><g data-mml-node="TeXAtom" transform="translate(599,-150) scale(0.707)" data-mjx-texclass="ORD"><g data-mml-node="mi"><use data-c="1D461" xlink:href="#MJX-1153-TEX-I-1D461"/></g><g data-mml-node="mo" transform="translate(361,0)"><use data-c="2212" xlink:href="#MJX-1153-TEX-N-2212"/></g><g data-mml-node="mn" transform="translate(1139,0)"><use data-c="31" xlink:href="#MJX-1153-TEX-N-31"/></g></g></g><g data-mml-node="mo" transform="translate(9334.1,0)"><use data-c="2C" xlink:href="#MJX-1153-TEX-N-2C"/></g><g data-mml-node="msub" transform="translate(9778.8,0)"><g data-mml-node="mi"><use data-c="1D6FE" xlink:href="#MJX-1153-TEX-I-1D6FE"/></g><g data-mml-node="TeXAtom" transform="translate(551,-150) scale(0.707)" data-mjx-texclass="ORD"><g data-mml-node="mi"><use data-c="1D461" xlink:href="#MJX-1153-TEX-I-1D461"/></g><g data-mml-node="mo" transform="translate(361,0)"><use data-c="2212" xlink:href="#MJX-1153-TEX-N-2212"/></g><g data-mml-node="mn" transform="translate(1139,0)"><use data-c="31" xlink:href="#MJX-1153-TEX-N-31"/></g></g></g><g data-mml-node="mo" transform="translate(11538.7,0)"><use data-c="29" xlink:href="#MJX-1153-TEX-N-29"/></g></g></g></svg></mjx-container></div></div></div></foreignObject><text x="948" y="47" fill="rgb(0, 0, 0)" font-family="Helvetica" font-size="25px" text-anchor="middle">$$ q(x_{t-1} | x_{t-2} ; \alpha_{t-1}, \beta_{t-1}, \gamma_{t-1} ) $$</text></switch></g><path d="M 1155.28 121.72 Q 1307 60 1451.57 119.3" fill="none" stroke="rgb(0, 0, 0)" stroke-miterlimit="10" pointer-events="stroke"/><path d="M 1456.43 121.29 L 1448.63 121.87 L 1451.57 119.3 L 1451.28 115.4 Z" fill="rgb(0, 0, 0)" stroke="rgb(0, 0, 0)" stroke-miterlimit="10" pointer-events="all"/><g transform="translate(-0.5 -0.5)"><switch><foreignObject pointer-events="none" width="100%" height="100%" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility" style="overflow: visible; text-align: left;"><div xmlns="http://www.w3.org/1999/xhtml" style="display: flex; align-items: unsafe center; justify-content: unsafe center; width: 1px; height: 1px; padding-top: 41px; margin-left: 1317px;"><div data-drawio-colors="color: rgb(0, 0, 0); background-color: #ffffff; " style="box-sizing: border-box; font-size: 0px; text-align: center;"><div style="display: inline-block; font-size: 25px; font-family: Helvetica; color: rgb(0, 0, 0); line-height: 1.2; pointer-events: all; background-color: rgb(255, 255, 255); white-space: nowrap;"><mjx-container class="MathJax" jax="SVG" display="true"><svg xmlns="http://www.w3.org/2000/svg" width="18.808ex" height="2.262ex" role="img" focusable="false" viewBox="0 -750 8313 1000" xmlns:xlink="http://www.w3.org/1999/xlink" style="vertical-align: -0.566ex;"><defs><path id="MJX-1154-TEX-I-1D45E" d="M33 157Q33 258 109 349T280 441Q340 441 372 389Q373 390 377 395T388 406T404 418Q438 442 450 442Q454 442 457 439T460 434Q460 425 391 149Q320 -135 320 -139Q320 -147 365 -148H390Q396 -156 396 -157T393 -175Q389 -188 383 -194H370Q339 -192 262 -192Q234 -192 211 -192T174 -192T157 -193Q143 -193 143 -185Q143 -182 145 -170Q149 -154 152 -151T172 -148Q220 -148 230 -141Q238 -136 258 -53T279 32Q279 33 272 29Q224 -10 172 -10Q117 -10 75 30T33 157ZM352 326Q329 405 277 405Q242 405 210 374T160 293Q131 214 119 129Q119 126 119 118T118 106Q118 61 136 44T179 26Q233 26 290 98L298 109L352 326Z"/><path id="MJX-1154-TEX-N-28" d="M94 250Q94 319 104 381T127 488T164 576T202 643T244 695T277 729T302 750H315H319Q333 750 333 741Q333 738 316 720T275 667T226 581T184 443T167 250T184 58T225 -81T274 -167T316 -220T333 -241Q333 -250 318 -250H315H302L274 -226Q180 -141 137 -14T94 250Z"/><path id="MJX-1154-TEX-I-1D465" d="M52 289Q59 331 106 386T222 442Q257 442 286 424T329 379Q371 442 430 442Q467 442 494 420T522 361Q522 332 508 314T481 292T458 288Q439 288 427 299T415 328Q415 374 465 391Q454 404 425 404Q412 404 406 402Q368 386 350 336Q290 115 290 78Q290 50 306 38T341 26Q378 26 414 59T463 140Q466 150 469 151T485 153H489Q504 153 504 145Q504 144 502 134Q486 77 440 33T333 -11Q263 -11 227 52Q186 -10 133 -10H127Q78 -10 57 16T35 71Q35 103 54 123T99 143Q142 143 142 101Q142 81 130 66T107 46T94 41L91 40Q91 39 97 36T113 29T132 26Q168 26 194 71Q203 87 217 139T245 247T261 313Q266 340 266 352Q266 380 251 392T217 404Q177 404 142 372T93 290Q91 281 88 280T72 278H58Q52 284 52 289Z"/><path id="MJX-1154-TEX-I-1D461" d="M26 385Q19 392 19 395Q19 399 22 411T27 425Q29 430 36 430T87 431H140L159 511Q162 522 166 540T173 566T179 586T187 603T197 615T211 624T229 626Q247 625 254 615T261 596Q261 589 252 549T232 470L222 433Q222 431 272 431H323Q330 424 330 420Q330 398 317 385H210L174 240Q135 80 135 68Q135 26 162 26Q197 26 230 60T283 144Q285 150 288 151T303 153H307Q322 153 322 145Q322 142 319 133Q314 117 301 95T267 48T216 6T155 -11Q125 -11 98 4T59 56Q57 64 57 83V101L92 241Q127 382 128 383Q128 385 77 385H26Z"/><path id="MJX-1154-TEX-N-7C" d="M139 -249H137Q125 -249 119 -235V251L120 737Q130 750 139 750Q152 750 159 735V-235Q151 -249 141 -249H139Z"/><path id="MJX-1154-TEX-N-2212" d="M84 237T84 250T98 270H679Q694 262 694 250T679 230H98Q84 237 84 250Z"/><path id="MJX-1154-TEX-N-31" d="M213 578L200 573Q186 568 160 563T102 556H83V602H102Q149 604 189 617T245 641T273 663Q275 666 285 666Q294 666 302 660V361L303 61Q310 54 315 52T339 48T401 46H427V0H416Q395 3 257 3Q121 3 100 0H88V46H114Q136 46 152 46T177 47T193 50T201 52T207 57T213 61V578Z"/><path id="MJX-1154-TEX-N-3B" d="M78 370Q78 394 95 412T138 430Q162 430 180 414T199 371Q199 346 182 328T139 310T96 327T78 370ZM78 60Q78 85 94 103T137 121Q202 121 202 8Q202 -44 183 -94T144 -169T118 -194Q115 -194 106 -186T95 -174Q94 -171 107 -155T137 -107T160 -38Q161 -32 162 -22T165 -4T165 4Q165 5 161 4T142 0Q110 0 94 18T78 60Z"/><path id="MJX-1154-TEX-I-1D6FC" d="M34 156Q34 270 120 356T309 442Q379 442 421 402T478 304Q484 275 485 237V208Q534 282 560 374Q564 388 566 390T582 393Q603 393 603 385Q603 376 594 346T558 261T497 161L486 147L487 123Q489 67 495 47T514 26Q528 28 540 37T557 60Q559 67 562 68T577 70Q597 70 597 62Q597 56 591 43Q579 19 556 5T512 -10H505Q438 -10 414 62L411 69L400 61Q390 53 370 41T325 18T267 -2T203 -11Q124 -11 79 39T34 156ZM208 26Q257 26 306 47T379 90L403 112Q401 255 396 290Q382 405 304 405Q235 405 183 332Q156 292 139 224T121 120Q121 71 146 49T208 26Z"/><path id="MJX-1154-TEX-N-2C" d="M78 35T78 60T94 103T137 121Q165 121 187 96T210 8Q210 -27 201 -60T180 -117T154 -158T130 -185T117 -194Q113 -194 104 -185T95 -172Q95 -168 106 -156T131 -126T157 -76T173 -3V9L172 8Q170 7 167 6T161 3T152 1T140 0Q113 0 96 17Z"/><path id="MJX-1154-TEX-I-1D6FD" d="M29 -194Q23 -188 23 -186Q23 -183 102 134T186 465Q208 533 243 584T309 658Q365 705 429 705H431Q493 705 533 667T573 570Q573 465 469 396L482 383Q533 332 533 252Q533 139 448 65T257 -10Q227 -10 203 -2T165 17T143 40T131 59T126 65L62 -188Q60 -194 42 -194H29ZM353 431Q392 431 427 419L432 422Q436 426 439 429T449 439T461 453T472 471T484 495T493 524T501 560Q503 569 503 593Q503 611 502 616Q487 667 426 667Q384 667 347 643T286 582T247 514T224 455Q219 439 186 308T152 168Q151 163 151 147Q151 99 173 68Q204 26 260 26Q302 26 349 51T425 137Q441 171 449 214T457 279Q457 337 422 372Q380 358 347 358H337Q258 358 258 389Q258 396 261 403Q275 431 353 431Z"/><path id="MJX-1154-TEX-I-1D6FE" d="M31 249Q11 249 11 258Q11 275 26 304T66 365T129 418T206 441Q233 441 239 440Q287 429 318 386T371 255Q385 195 385 170Q385 166 386 166L398 193Q418 244 443 300T486 391T508 430Q510 431 524 431H537Q543 425 543 422Q543 418 522 378T463 251T391 71Q385 55 378 6T357 -100Q341 -165 330 -190T303 -216Q286 -216 286 -188Q286 -138 340 32L346 51L347 69Q348 79 348 100Q348 257 291 317Q251 355 196 355Q148 355 108 329T51 260Q49 251 47 251Q45 249 31 249Z"/><path id="MJX-1154-TEX-N-29" d="M60 749L64 750Q69 750 74 750H86L114 726Q208 641 251 514T294 250Q294 182 284 119T261 12T224 -76T186 -143T145 -194T113 -227T90 -246Q87 -249 86 -250H74Q66 -250 63 -250T58 -247T55 -238Q56 -237 66 -225Q221 -64 221 250T66 725Q56 737 55 738Q55 746 60 749Z"/></defs><g stroke="currentColor" fill="currentColor" stroke-width="0" transform="scale(1,-1)"><g data-mml-node="math"><g data-mml-node="mi"><use data-c="1D45E" xlink:href="#MJX-1154-TEX-I-1D45E"/></g><g data-mml-node="mo" transform="translate(460,0)"><use data-c="28" xlink:href="#MJX-1154-TEX-N-28"/></g><g data-mml-node="msub" transform="translate(849,0)"><g data-mml-node="mi"><use data-c="1D465" xlink:href="#MJX-1154-TEX-I-1D465"/></g><g data-mml-node="TeXAtom" transform="translate(605,-150) scale(0.707)" data-mjx-texclass="ORD"><g data-mml-node="mi"><use data-c="1D461" xlink:href="#MJX-1154-TEX-I-1D461"/></g></g></g><g data-mml-node="TeXAtom" data-mjx-texclass="ORD" transform="translate(1759.3,0)"><g data-mml-node="mo" transform="translate(0 -0.5)"><use data-c="7C" xlink:href="#MJX-1154-TEX-N-7C"/></g></g><g data-mml-node="msub" transform="translate(2037.3,0)"><g data-mml-node="mi"><use data-c="1D465" xlink:href="#MJX-1154-TEX-I-1D465"/></g><g data-mml-node="TeXAtom" transform="translate(605,-150) scale(0.707)" data-mjx-texclass="ORD"><g data-mml-node="mi"><use data-c="1D461" xlink:href="#MJX-1154-TEX-I-1D461"/></g><g data-mml-node="mo" transform="translate(361,0)"><use data-c="2212" xlink:href="#MJX-1154-TEX-N-2212"/></g><g data-mml-node="mn" transform="translate(1139,0)"><use data-c="31" xlink:href="#MJX-1154-TEX-N-31"/></g></g></g><g data-mml-node="mo" transform="translate(3851.2,0)"><use data-c="3B" xlink:href="#MJX-1154-TEX-N-3B"/></g><g data-mml-node="msub" transform="translate(4295.9,0)"><g data-mml-node="mi"><use data-c="1D6FC" xlink:href="#MJX-1154-TEX-I-1D6FC"/></g><g data-mml-node="TeXAtom" transform="translate(673,-150) scale(0.707)" data-mjx-texclass="ORD"><g data-mml-node="mi"><use data-c="1D461" xlink:href="#MJX-1154-TEX-I-1D461"/></g></g></g><g data-mml-node="mo" transform="translate(5274.1,0)"><use data-c="2C" xlink:href="#MJX-1154-TEX-N-2C"/></g><g data-mml-node="msub" transform="translate(5718.8,0)"><g data-mml-node="mi"><use data-c="1D6FD" xlink:href="#MJX-1154-TEX-I-1D6FD"/></g><g data-mml-node="TeXAtom" transform="translate(599,-150) scale(0.707)" data-mjx-texclass="ORD"><g data-mml-node="mi"><use data-c="1D461" xlink:href="#MJX-1154-TEX-I-1D461"/></g></g></g><g data-mml-node="mo" transform="translate(6623.1,0)"><use data-c="2C" xlink:href="#MJX-1154-TEX-N-2C"/></g><g data-mml-node="msub" transform="translate(7067.7,0)"><g data-mml-node="mi"><use data-c="1D6FE" xlink:href="#MJX-1154-TEX-I-1D6FE"/></g><g data-mml-node="TeXAtom" transform="translate(551,-150) scale(0.707)" data-mjx-texclass="ORD"><g data-mml-node="mi"><use data-c="1D461" xlink:href="#MJX-1154-TEX-I-1D461"/></g></g></g><g data-mml-node="mo" transform="translate(7924,0)"><use data-c="29" xlink:href="#MJX-1154-TEX-N-29"/></g></g></g></svg></mjx-container></div></div></div></foreignObject><text x="1317" y="49" fill="rgb(0, 0, 0)" font-family="Helvetica" font-size="25px" text-anchor="middle">$$ q(x_{t} | x_{t-1} ; \alpha_{t}, \beta_{t}, \gamma_{t} ) $$</text></switch></g><rect x="1395.75" y="400" width="180" height="90" fill="rgb(255, 255, 255)" stroke="rgb(0, 0, 0)" pointer-events="all"/><path d="M 1413.75 400 L 1413.75 490 M 1557.75 400 L 1557.75 490" fill="none" stroke="rgb(0, 0, 0)" stroke-miterlimit="10" pointer-events="all"/><g transform="translate(-0.5 -0.5)"><switch><foreignObject pointer-events="none" width="100%" height="100%" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility" style="overflow: visible; text-align: left;"><div xmlns="http://www.w3.org/1999/xhtml" style="display: flex; align-items: unsafe center; justify-content: unsafe center; width: 142px; height: 1px; padding-top: 445px; margin-left: 1415px;"><div data-drawio-colors="color: rgb(0, 0, 0); " style="box-sizing: border-box; font-size: 0px; text-align: center;"><div style="display: inline-block; font-size: 25px; font-family: Helvetica; color: rgb(0, 0, 0); line-height: 1.2; pointer-events: all; white-space: normal; overflow-wrap: normal;">Image Transformer Decoder</div></div></div></foreignObject><text x="1486" y="453" fill="rgb(0, 0, 0)" font-family="Helvetica" font-size="25px" text-anchor="middle">Image Transf...</text></switch></g><rect x="1867" y="402.5" width="190" height="85" fill="rgb(255, 255, 255)" stroke="rgb(0, 0, 0)" pointer-events="all"/><path d="M 1886 402.5 L 1886 487.5 M 2038 402.5 L 2038 487.5" fill="none" stroke="rgb(0, 0, 0)" stroke-miterlimit="10" pointer-events="all"/><g transform="translate(-0.5 -0.5)"><switch><foreignObject pointer-events="none" width="100%" height="100%" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility" style="overflow: visible; text-align: left;"><div xmlns="http://www.w3.org/1999/xhtml" style="display: flex; align-items: unsafe center; justify-content: unsafe center; width: 149px; height: 1px; padding-top: 445px; margin-left: 1887px;"><div data-drawio-colors="color: rgb(0, 0, 0); " style="box-sizing: border-box; font-size: 0px; text-align: center;"><div style="display: inline-block; font-size: 25px; font-family: Helvetica; color: rgb(0, 0, 0); line-height: 1.2; pointer-events: all; white-space: normal; overflow-wrap: normal;">Frozen CLIP Text Encoder</div></div></div></foreignObject><text x="1961" y="453" fill="rgb(0, 0, 0)" font-family="Helvetica" font-size="25px" text-anchor="middle">Frozen CLIP...</text></switch></g><path d="M 1867 445 L 1582.12 445" fill="none" stroke="rgb(0, 0, 0)" stroke-miterlimit="10" pointer-events="stroke"/><path d="M 1576.87 445 L 1583.87 441.5 L 1582.12 445 L 1583.87 448.5 Z" fill="rgb(0, 0, 0)" stroke="rgb(0, 0, 0)" stroke-miterlimit="10" pointer-events="all"/><g transform="translate(-0.5 -0.5)"><switch><foreignObject pointer-events="none" width="100%" height="100%" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility" style="overflow: visible; text-align: left;"><div xmlns="http://www.w3.org/1999/xhtml" style="display: flex; align-items: unsafe center; justify-content: unsafe center; width: 1px; height: 1px; padding-top: 420px; margin-left: 1725px;"><div data-drawio-colors="color: rgb(0, 0, 0); background-color: #ffffff; " style="box-sizing: border-box; font-size: 0px; text-align: center;"><div style="display: inline-block; font-size: 25px; font-family: Helvetica; color: rgb(0, 0, 0); line-height: 1.2; pointer-events: all; background-color: rgb(255, 255, 255); white-space: nowrap;"> y : (batch size, 77, 512)</div></div></div></foreignObject><text x="1725" y="427" fill="rgb(0, 0, 0)" font-family="Helvetica" font-size="25px" text-anchor="middle"> y : (batch size, 77, 512)</text></switch></g><path d="M 1485.75 190 L 1485.75 393.63" fill="none" stroke="rgb(0, 0, 0)" stroke-miterlimit="10" pointer-events="stroke"/><path d="M 1485.75 398.88 L 1482.25 391.88 L 1485.75 393.63 L 1489.25 391.88 Z" fill="rgb(0, 0, 0)" stroke="rgb(0, 0, 0)" stroke-miterlimit="10" pointer-events="all"/><g transform="translate(-0.5 -0.5)"><switch><foreignObject pointer-events="none" width="100%" height="100%" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility" style="overflow: visible; text-align: left;"><div xmlns="http://www.w3.org/1999/xhtml" style="display: flex; align-items: unsafe center; justify-content: unsafe center; width: 1px; height: 1px; padding-top: 270px; margin-left: 1557px;"><div data-drawio-colors="color: rgb(0, 0, 0); background-color: #ffffff; " style="box-sizing: border-box; font-size: 0px; text-align: center;"><div style="display: inline-block; font-size: 25px; font-family: Helvetica; color: rgb(0, 0, 0); line-height: 1.2; pointer-events: all; background-color: rgb(255, 255, 255); white-space: nowrap;">x<sub style="font-size: 25px;">t </sub>: (batch size, number latent pixels)</div></div></div></foreignObject><text x="1557" y="278" fill="rgb(0, 0, 0)" font-family="Helvetica" font-size="25px" text-anchor="middle">xt : (batch size, number latent pixels)</text></switch></g><rect x="1047" y="780" width="877.5" height="211" fill="rgb(255, 255, 255)" stroke="rgb(0, 0, 0)" pointer-events="all"/><path d="M 1135 780 L 1135 991 M 1836.5 780 L 1836.5 991" fill="none" stroke="rgb(0, 0, 0)" stroke-miterlimit="10" pointer-events="all"/><g transform="translate(-0.5 -0.5)"><switch><foreignObject pointer-events="none" width="100%" height="100%" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility" style="overflow: visible; text-align: left;"><div xmlns="http://www.w3.org/1999/xhtml" style="display: flex; align-items: unsafe center; justify-content: unsafe center; width: 700px; height: 1px; padding-top: 886px; margin-left: 1136px;"><div data-drawio-colors="color: rgb(0, 0, 0); " style="box-sizing: border-box; font-size: 0px; text-align: center;"><div style="display: inline-block; font-size: 25px; font-family: Helvetica; color: rgb(0, 0, 0); line-height: 1.2; pointer-events: all; white-space: normal; overflow-wrap: normal;">Scheduler<br style="font-size: 25px;" /><span style="font-size: 25px;"><mjx-container class="MathJax" jax="SVG" display="true"><svg xmlns="http://www.w3.org/2000/svg" width="51.121ex" height="7.008ex" role="img" focusable="false" viewBox="0 -1733 22595.4 3097.4" xmlns:xlink="http://www.w3.org/1999/xlink" style="vertical-align: -3.087ex;"><defs><path id="MJX-1155-TEX-I-1D45D" d="M23 287Q24 290 25 295T30 317T40 348T55 381T75 411T101 433T134 442Q209 442 230 378L240 387Q302 442 358 442Q423 442 460 395T497 281Q497 173 421 82T249 -10Q227 -10 210 -4Q199 1 187 11T168 28L161 36Q160 35 139 -51T118 -138Q118 -144 126 -145T163 -148H188Q194 -155 194 -157T191 -175Q188 -187 185 -190T172 -194Q170 -194 161 -194T127 -193T65 -192Q-5 -192 -24 -194H-32Q-39 -187 -39 -183Q-37 -156 -26 -148H-6Q28 -147 33 -136Q36 -130 94 103T155 350Q156 355 156 364Q156 405 131 405Q109 405 94 377T71 316T59 280Q57 278 43 278H29Q23 284 23 287ZM178 102Q200 26 252 26Q282 26 310 49T356 107Q374 141 392 215T411 325V331Q411 405 350 405Q339 405 328 402T306 393T286 380T269 365T254 350T243 336T235 326L232 322Q232 321 229 308T218 264T204 212Q178 106 178 102Z"/><path id="MJX-1155-TEX-I-1D703" d="M35 200Q35 302 74 415T180 610T319 704Q320 704 327 704T339 705Q393 701 423 656Q462 596 462 495Q462 380 417 261T302 66T168 -10H161Q125 -10 99 10T60 63T41 130T35 200ZM383 566Q383 668 330 668Q294 668 260 623T204 521T170 421T157 371Q206 370 254 370L351 371Q352 372 359 404T375 484T383 566ZM113 132Q113 26 166 26Q181 26 198 36T239 74T287 161T335 307L340 324H145Q145 321 136 286T120 208T113 132Z"/><path id="MJX-1155-TEX-N-28" d="M94 250Q94 319 104 381T127 488T164 576T202 643T244 695T277 729T302 750H315H319Q333 750 333 741Q333 738 316 720T275 667T226 581T184 443T167 250T184 58T225 -81T274 -167T316 -220T333 -241Q333 -250 318 -250H315H302L274 -226Q180 -141 137 -14T94 250Z"/><path id="MJX-1155-TEX-I-1D465" d="M52 289Q59 331 106 386T222 442Q257 442 286 424T329 379Q371 442 430 442Q467 442 494 420T522 361Q522 332 508 314T481 292T458 288Q439 288 427 299T415 328Q415 374 465 391Q454 404 425 404Q412 404 406 402Q368 386 350 336Q290 115 290 78Q290 50 306 38T341 26Q378 26 414 59T463 140Q466 150 469 151T485 153H489Q504 153 504 145Q504 144 502 134Q486 77 440 33T333 -11Q263 -11 227 52Q186 -10 133 -10H127Q78 -10 57 16T35 71Q35 103 54 123T99 143Q142 143 142 101Q142 81 130 66T107 46T94 41L91 40Q91 39 97 36T113 29T132 26Q168 26 194 71Q203 87 217 139T245 247T261 313Q266 340 266 352Q266 380 251 392T217 404Q177 404 142 372T93 290Q91 281 88 280T72 278H58Q52 284 52 289Z"/><path id="MJX-1155-TEX-I-1D461" d="M26 385Q19 392 19 395Q19 399 22 411T27 425Q29 430 36 430T87 431H140L159 511Q162 522 166 540T173 566T179 586T187 603T197 615T211 624T229 626Q247 625 254 615T261 596Q261 589 252 549T232 470L222 433Q222 431 272 431H323Q330 424 330 420Q330 398 317 385H210L174 240Q135 80 135 68Q135 26 162 26Q197 26 230 60T283 144Q285 150 288 151T303 153H307Q322 153 322 145Q322 142 319 133Q314 117 301 95T267 48T216 6T155 -11Q125 -11 98 4T59 56Q57 64 57 83V101L92 241Q127 382 128 383Q128 385 77 385H26Z"/><path id="MJX-1155-TEX-N-2212" d="M84 237T84 250T98 270H679Q694 262 694 250T679 230H98Q84 237 84 250Z"/><path id="MJX-1155-TEX-N-31" d="M213 578L200 573Q186 568 160 563T102 556H83V602H102Q149 604 189 617T245 641T273 663Q275 666 285 666Q294 666 302 660V361L303 61Q310 54 315 52T339 48T401 46H427V0H416Q395 3 257 3Q121 3 100 0H88V46H114Q136 46 152 46T177 47T193 50T201 52T207 57T213 61V578Z"/><path id="MJX-1155-TEX-N-7C" d="M139 -249H137Q125 -249 119 -235V251L120 737Q130 750 139 750Q152 750 159 735V-235Q151 -249 141 -249H139Z"/><path id="MJX-1155-TEX-N-2C" d="M78 35T78 60T94 103T137 121Q165 121 187 96T210 8Q210 -27 201 -60T180 -117T154 -158T130 -185T117 -194Q113 -194 104 -185T95 -172Q95 -168 106 -156T131 -126T157 -76T173 -3V9L172 8Q170 7 167 6T161 3T152 1T140 0Q113 0 96 17Z"/><path id="MJX-1155-TEX-I-1D466" d="M21 287Q21 301 36 335T84 406T158 442Q199 442 224 419T250 355Q248 336 247 334Q247 331 231 288T198 191T182 105Q182 62 196 45T238 27Q261 27 281 38T312 61T339 94Q339 95 344 114T358 173T377 247Q415 397 419 404Q432 431 462 431Q475 431 483 424T494 412T496 403Q496 390 447 193T391 -23Q363 -106 294 -155T156 -205Q111 -205 77 -183T43 -117Q43 -95 50 -80T69 -58T89 -48T106 -45Q150 -45 150 -87Q150 -107 138 -122T115 -142T102 -147L99 -148Q101 -153 118 -160T152 -167H160Q177 -167 186 -165Q219 -156 247 -127T290 -65T313 -9T321 21L315 17Q309 13 296 6T270 -6Q250 -11 231 -11Q185 -11 150 11T104 82Q103 89 103 113Q103 170 138 262T173 379Q173 380 173 381Q173 390 173 393T169 400T158 404H154Q131 404 112 385T82 344T65 302T57 280Q55 278 41 278H27Q21 284 21 287Z"/><path id="MJX-1155-TEX-N-29" d="M60 749L64 750Q69 750 74 750H86L114 726Q208 641 251 514T294 250Q294 182 284 119T261 12T224 -76T186 -143T145 -194T113 -227T90 -246Q87 -249 86 -250H74Q66 -250 63 -250T58 -247T55 -238Q56 -237 66 -225Q221 -64 221 250T66 725Q56 737 55 738Q55 746 60 749Z"/><path id="MJX-1155-TEX-N-3D" d="M56 347Q56 360 70 367H707Q722 359 722 347Q722 336 708 328L390 327H72Q56 332 56 347ZM56 153Q56 168 72 173H708Q722 163 722 153Q722 140 707 133H70Q56 140 56 153Z"/><path id="MJX-1155-TEX-LO-2211" d="M60 948Q63 950 665 950H1267L1325 815Q1384 677 1388 669H1348L1341 683Q1320 724 1285 761Q1235 809 1174 838T1033 881T882 898T699 902H574H543H251L259 891Q722 258 724 252Q725 250 724 246Q721 243 460 -56L196 -356Q196 -357 407 -357Q459 -357 548 -357T676 -358Q812 -358 896 -353T1063 -332T1204 -283T1307 -196Q1328 -170 1348 -124H1388Q1388 -125 1381 -145T1356 -210T1325 -294L1267 -449L666 -450Q64 -450 61 -448Q55 -446 55 -439Q55 -437 57 -433L590 177Q590 178 557 222T452 366T322 544L56 909L55 924Q55 945 60 948Z"/><path id="MJX-1155-TEX-N-7E" d="M179 251Q164 251 151 245T131 234T111 215L97 227L83 238Q83 239 95 253T121 283T142 304Q165 318 187 318T253 300T320 282Q335 282 348 288T368 299T388 318L402 306L416 295Q375 236 344 222Q330 215 313 215Q292 215 248 233T179 251Z"/><path id="MJX-1155-TEX-N-30" d="M96 585Q152 666 249 666Q297 666 345 640T423 548Q460 465 460 320Q460 165 417 83Q397 41 362 16T301 -15T250 -22Q224 -22 198 -16T137 16T82 83Q39 165 39 320Q39 494 96 585ZM321 597Q291 629 250 629Q208 629 178 597Q153 571 145 525T137 333Q137 175 145 125T181 46Q209 16 250 16Q290 16 318 46Q347 76 354 130T362 333Q362 478 354 524T321 597Z"/><path id="MJX-1155-TEX-I-1D43E" d="M285 628Q285 635 228 637Q205 637 198 638T191 647Q191 649 193 661Q199 681 203 682Q205 683 214 683H219Q260 681 355 681Q389 681 418 681T463 682T483 682Q500 682 500 674Q500 669 497 660Q496 658 496 654T495 648T493 644T490 641T486 639T479 638T470 637T456 637Q416 636 405 634T387 623L306 305Q307 305 490 449T678 597Q692 611 692 620Q692 635 667 637Q651 637 651 648Q651 650 654 662T659 677Q662 682 676 682Q680 682 711 681T791 680Q814 680 839 681T869 682Q889 682 889 672Q889 650 881 642Q878 637 862 637Q787 632 726 586Q710 576 656 534T556 455L509 418L518 396Q527 374 546 329T581 244Q656 67 661 61Q663 59 666 57Q680 47 717 46H738Q744 38 744 37T741 19Q737 6 731 0H720Q680 3 625 3Q503 3 488 0H478Q472 6 472 9T474 27Q478 40 480 43T491 46H494Q544 46 544 71Q544 75 517 141T485 216L427 354L359 301L291 248L268 155Q245 63 245 58Q245 51 253 49T303 46H334Q340 37 340 35Q340 19 333 5Q328 0 317 0Q314 0 280 1T180 2Q118 2 85 2T49 1Q31 1 31 11Q31 13 34 25Q38 41 42 43T65 46Q92 46 125 49Q139 52 144 61Q147 65 216 339T285 628Z"/><path id="MJX-1155-TEX-I-1D45E" d="M33 157Q33 258 109 349T280 441Q340 441 372 389Q373 390 377 395T388 406T404 418Q438 442 450 442Q454 442 457 439T460 434Q460 425 391 149Q320 -135 320 -139Q320 -147 365 -148H390Q396 -156 396 -157T393 -175Q389 -188 383 -194H370Q339 -192 262 -192Q234 -192 211 -192T174 -192T157 -193Q143 -193 143 -185Q143 -182 145 -170Q149 -154 152 -151T172 -148Q220 -148 230 -141Q238 -136 258 -53T279 32Q279 33 272 29Q224 -10 172 -10Q117 -10 75 30T33 157ZM352 326Q329 405 277 405Q242 405 210 374T160 293Q131 214 119 129Q119 126 119 118T118 106Q118 61 136 44T179 26Q233 26 290 98L298 109L352 326Z"/></defs><g stroke="currentColor" fill="currentColor" stroke-width="0" transform="scale(1,-1)"><g data-mml-node="math"><g data-mml-node="msub"><g data-mml-node="mi"><use data-c="1D45D" xlink:href="#MJX-1155-TEX-I-1D45D"/></g><g data-mml-node="mi" transform="translate(536,-150) scale(0.707)"><use data-c="1D703" xlink:href="#MJX-1155-TEX-I-1D703"/></g></g><g data-mml-node="mo" transform="translate(917.6,0)"><use data-c="28" xlink:href="#MJX-1155-TEX-N-28"/></g><g data-mml-node="msub" transform="translate(1306.6,0)"><g data-mml-node="mi"><use data-c="1D465" xlink:href="#MJX-1155-TEX-I-1D465"/></g><g data-mml-node="TeXAtom" transform="translate(605,-150) scale(0.707)" data-mjx-texclass="ORD"><g data-mml-node="mi"><use data-c="1D461" xlink:href="#MJX-1155-TEX-I-1D461"/></g><g data-mml-node="mo" transform="translate(361,0)"><use data-c="2212" xlink:href="#MJX-1155-TEX-N-2212"/></g><g data-mml-node="mn" transform="translate(1139,0)"><use data-c="31" xlink:href="#MJX-1155-TEX-N-31"/></g></g></g><g data-mml-node="TeXAtom" data-mjx-texclass="ORD" transform="translate(3120.6,0)"><g data-mml-node="mo" transform="translate(0 -0.5)"><use data-c="7C" xlink:href="#MJX-1155-TEX-N-7C"/></g></g><g data-mml-node="msub" transform="translate(3398.6,0)"><g data-mml-node="mi"><use data-c="1D465" xlink:href="#MJX-1155-TEX-I-1D465"/></g><g data-mml-node="mi" transform="translate(605,-150) scale(0.707)"><use data-c="1D461" xlink:href="#MJX-1155-TEX-I-1D461"/></g></g><g data-mml-node="mo" transform="translate(4308.8,0)"><use data-c="2C" xlink:href="#MJX-1155-TEX-N-2C"/></g><g data-mml-node="mi" transform="translate(4753.5,0)"><use data-c="1D466" xlink:href="#MJX-1155-TEX-I-1D466"/></g><g data-mml-node="mo" transform="translate(5243.5,0)"><use data-c="29" xlink:href="#MJX-1155-TEX-N-29"/></g><g data-mml-node="mo" transform="translate(5910.3,0)"><use data-c="3D" xlink:href="#MJX-1155-TEX-N-3D"/></g><g data-mml-node="munderover" transform="translate(6966.1,0)"><g data-mml-node="mo" transform="translate(86.4,0)"><use data-c="2211" xlink:href="#MJX-1155-TEX-LO-2211"/></g><g data-mml-node="TeXAtom" transform="translate(0,-1147.3) scale(0.707)" data-mjx-texclass="ORD"><g data-mml-node="msub"><g data-mml-node="TeXAtom" data-mjx-texclass="ORD"><g data-mml-node="mover"><g data-mml-node="mi"><use data-c="1D465" xlink:href="#MJX-1155-TEX-I-1D465"/></g><g data-mml-node="mo" transform="translate(313.8,332) translate(-250 0)"><use data-c="7E" xlink:href="#MJX-1155-TEX-N-7E"/></g></g></g><g data-mml-node="mn" transform="translate(605,-150) scale(0.707)"><use data-c="30" xlink:href="#MJX-1155-TEX-N-30"/></g></g><g data-mml-node="mo" transform="translate(1008.6,0)"><use data-c="3D" xlink:href="#MJX-1155-TEX-N-3D"/></g><g data-mml-node="mn" transform="translate(1786.6,0)"><use data-c="31" xlink:href="#MJX-1155-TEX-N-31"/></g></g><g data-mml-node="TeXAtom" transform="translate(494.1,1150) scale(0.707)" data-mjx-texclass="ORD"><g data-mml-node="mi"><use data-c="1D43E" xlink:href="#MJX-1155-TEX-I-1D43E"/></g></g></g><g data-mml-node="TeXAtom" data-mjx-texclass="ORD" transform="translate(8749.6,0)"><g data-mml-node="mfrac"><g data-mml-node="mrow" transform="translate(220,710)"><g data-mml-node="mi"><use data-c="1D45E" xlink:href="#MJX-1155-TEX-I-1D45E"/></g><g data-mml-node="mo" transform="translate(460,0)"><use data-c="28" xlink:href="#MJX-1155-TEX-N-28"/></g><g data-mml-node="msub" transform="translate(849,0)"><g data-mml-node="mi"><use data-c="1D465" xlink:href="#MJX-1155-TEX-I-1D465"/></g><g data-mml-node="mi" transform="translate(605,-150) scale(0.707)"><use data-c="1D461" xlink:href="#MJX-1155-TEX-I-1D461"/></g></g><g data-mml-node="TeXAtom" data-mjx-texclass="ORD" transform="translate(1759.3,0)"><g data-mml-node="mo" transform="translate(0 -0.5)"><use data-c="7C" xlink:href="#MJX-1155-TEX-N-7C"/></g></g><g data-mml-node="msub" transform="translate(2037.3,0)"><g data-mml-node="mi"><use data-c="1D465" xlink:href="#MJX-1155-TEX-I-1D465"/></g><g data-mml-node="TeXAtom" transform="translate(605,-150) scale(0.707)" data-mjx-texclass="ORD"><g data-mml-node="mi"><use data-c="1D461" xlink:href="#MJX-1155-TEX-I-1D461"/></g><g data-mml-node="mo" transform="translate(361,0)"><use data-c="2212" xlink:href="#MJX-1155-TEX-N-2212"/></g><g data-mml-node="mn" transform="translate(1139,0)"><use data-c="31" xlink:href="#MJX-1155-TEX-N-31"/></g></g></g><g data-mml-node="mo" transform="translate(3851.2,0)"><use data-c="29" xlink:href="#MJX-1155-TEX-N-29"/></g><g data-mml-node="mi" transform="translate(4240.2,0)"><use data-c="1D45E" xlink:href="#MJX-1155-TEX-I-1D45E"/></g><g data-mml-node="mo" transform="translate(4700.2,0)"><use data-c="28" xlink:href="#MJX-1155-TEX-N-28"/></g><g data-mml-node="msub" transform="translate(5089.2,0)"><g data-mml-node="mi"><use data-c="1D465" xlink:href="#MJX-1155-TEX-I-1D465"/></g><g data-mml-node="TeXAtom" transform="translate(605,-150) scale(0.707)" data-mjx-texclass="ORD"><g data-mml-node="mi"><use data-c="1D461" xlink:href="#MJX-1155-TEX-I-1D461"/></g><g data-mml-node="mo" transform="translate(361,0)"><use data-c="2212" xlink:href="#MJX-1155-TEX-N-2212"/></g><g data-mml-node="mn" transform="translate(1139,0)"><use data-c="31" xlink:href="#MJX-1155-TEX-N-31"/></g></g></g><g data-mml-node="TeXAtom" data-mjx-texclass="ORD" transform="translate(6903.2,0)"><g data-mml-node="mo" transform="translate(0 -0.5)"><use data-c="7C" xlink:href="#MJX-1155-TEX-N-7C"/></g></g><g data-mml-node="msub" transform="translate(7181.2,0)"><g data-mml-node="mi"><use data-c="1D465" xlink:href="#MJX-1155-TEX-I-1D465"/></g><g data-mml-node="mn" transform="translate(605,-150) scale(0.707)"><use data-c="30" xlink:href="#MJX-1155-TEX-N-30"/></g></g><g data-mml-node="mo" transform="translate(8189.7,0)"><use data-c="29" xlink:href="#MJX-1155-TEX-N-29"/></g></g><g data-mml-node="mrow" transform="translate(2791.9,-710)"><g data-mml-node="mi"><use data-c="1D45E" xlink:href="#MJX-1155-TEX-I-1D45E"/></g><g data-mml-node="mo" transform="translate(460,0)"><use data-c="28" xlink:href="#MJX-1155-TEX-N-28"/></g><g data-mml-node="msub" transform="translate(849,0)"><g data-mml-node="mi"><use data-c="1D465" xlink:href="#MJX-1155-TEX-I-1D465"/></g><g data-mml-node="mi" transform="translate(605,-150) scale(0.707)"><use data-c="1D461" xlink:href="#MJX-1155-TEX-I-1D461"/></g></g><g data-mml-node="TeXAtom" data-mjx-texclass="ORD" transform="translate(1759.3,0)"><g data-mml-node="mo" transform="translate(0 -0.5)"><use data-c="7C" xlink:href="#MJX-1155-TEX-N-7C"/></g></g><g data-mml-node="msub" transform="translate(2037.3,0)"><g data-mml-node="mi"><use data-c="1D465" xlink:href="#MJX-1155-TEX-I-1D465"/></g><g data-mml-node="mn" transform="translate(605,-150) scale(0.707)"><use data-c="30" xlink:href="#MJX-1155-TEX-N-30"/></g></g><g data-mml-node="mo" transform="translate(3045.8,0)"><use data-c="29" xlink:href="#MJX-1155-TEX-N-29"/></g></g><rect width="8778.7" height="60" x="120" y="220"/></g><g data-mml-node="msub" transform="translate(9018.7,0)"><g data-mml-node="mi"><use data-c="1D45D" xlink:href="#MJX-1155-TEX-I-1D45D"/></g><g data-mml-node="mi" transform="translate(536,-150) scale(0.707)"><use data-c="1D703" xlink:href="#MJX-1155-TEX-I-1D703"/></g></g><g data-mml-node="mo" transform="translate(9936.3,0)"><use data-c="28" xlink:href="#MJX-1155-TEX-N-28"/></g><g data-mml-node="msub" transform="translate(10325.3,0)"><g data-mml-node="TeXAtom" data-mjx-texclass="ORD"><g data-mml-node="mover"><g data-mml-node="mi"><use data-c="1D465" xlink:href="#MJX-1155-TEX-I-1D465"/></g><g data-mml-node="mo" transform="translate(313.8,332) translate(-250 0)"><use data-c="7E" xlink:href="#MJX-1155-TEX-N-7E"/></g></g></g><g data-mml-node="mn" transform="translate(605,-150) scale(0.707)"><use data-c="30" xlink:href="#MJX-1155-TEX-N-30"/></g></g><g data-mml-node="TeXAtom" data-mjx-texclass="ORD" transform="translate(11333.9,0)"><g data-mml-node="mo" transform="translate(0 -0.5)"><use data-c="7C" xlink:href="#MJX-1155-TEX-N-7C"/></g></g><g data-mml-node="msub" transform="translate(11611.9,0)"><g data-mml-node="mi"><use data-c="1D465" xlink:href="#MJX-1155-TEX-I-1D465"/></g><g data-mml-node="mi" transform="translate(605,-150) scale(0.707)"><use data-c="1D461" xlink:href="#MJX-1155-TEX-I-1D461"/></g></g><g data-mml-node="mo" transform="translate(12522.2,0)"><use data-c="2C" xlink:href="#MJX-1155-TEX-N-2C"/></g><g data-mml-node="mi" transform="translate(12966.8,0)"><use data-c="1D466" xlink:href="#MJX-1155-TEX-I-1D466"/></g><g data-mml-node="mo" transform="translate(13456.8,0)"><use data-c="29" xlink:href="#MJX-1155-TEX-N-29"/></g></g></g></g></svg></mjx-container> </span></div></div></div></foreignObject><text x="1486" y="893" fill="rgb(0, 0, 0)" font-family="Helvetica" font-size="25px" text-anchor="middle">Scheduler...</text></switch></g><path d="M 1485.75 490 L 1485.75 773.63" fill="none" stroke="rgb(0, 0, 0)" stroke-miterlimit="10" pointer-events="stroke"/><path d="M 1485.75 778.88 L 1482.25 771.88 L 1485.75 773.63 L 1489.25 771.88 Z" fill="rgb(0, 0, 0)" stroke="rgb(0, 0, 0)" stroke-miterlimit="10" pointer-events="all"/><g transform="translate(-0.5 -0.5)"><switch><foreignObject pointer-events="none" width="100%" height="100%" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility" style="overflow: visible; text-align: left;"><div xmlns="http://www.w3.org/1999/xhtml" style="display: flex; align-items: unsafe center; justify-content: unsafe center; width: 1px; height: 1px; padding-top: 700px; margin-left: 1547px;"><div data-drawio-colors="color: rgb(0, 0, 0); background-color: #ffffff; " style="box-sizing: border-box; font-size: 0px; text-align: center;"><div style="display: inline-block; font-size: 25px; font-family: Helvetica; color: rgb(0, 0, 0); line-height: 1.2; pointer-events: all; background-color: rgb(255, 255, 255); white-space: nowrap;"><mjx-container class="MathJax" jax="SVG"><svg xmlns="http://www.w3.org/2000/svg" width="12.178ex" height="2.262ex" role="img" focusable="false" viewBox="0 -750 5382.7 1000" xmlns:xlink="http://www.w3.org/1999/xlink" style="vertical-align: -0.566ex;"><defs><path id="MJX-1150-TEX-I-1D45D" d="M23 287Q24 290 25 295T30 317T40 348T55 381T75 411T101 433T134 442Q209 442 230 378L240 387Q302 442 358 442Q423 442 460 395T497 281Q497 173 421 82T249 -10Q227 -10 210 -4Q199 1 187 11T168 28L161 36Q160 35 139 -51T118 -138Q118 -144 126 -145T163 -148H188Q194 -155 194 -157T191 -175Q188 -187 185 -190T172 -194Q170 -194 161 -194T127 -193T65 -192Q-5 -192 -24 -194H-32Q-39 -187 -39 -183Q-37 -156 -26 -148H-6Q28 -147 33 -136Q36 -130 94 103T155 350Q156 355 156 364Q156 405 131 405Q109 405 94 377T71 316T59 280Q57 278 43 278H29Q23 284 23 287ZM178 102Q200 26 252 26Q282 26 310 49T356 107Q374 141 392 215T411 325V331Q411 405 350 405Q339 405 328 402T306 393T286 380T269 365T254 350T243 336T235 326L232 322Q232 321 229 308T218 264T204 212Q178 106 178 102Z"/><path id="MJX-1150-TEX-I-1D703" d="M35 200Q35 302 74 415T180 610T319 704Q320 704 327 704T339 705Q393 701 423 656Q462 596 462 495Q462 380 417 261T302 66T168 -10H161Q125 -10 99 10T60 63T41 130T35 200ZM383 566Q383 668 330 668Q294 668 260 623T204 521T170 421T157 371Q206 370 254 370L351 371Q352 372 359 404T375 484T383 566ZM113 132Q113 26 166 26Q181 26 198 36T239 74T287 161T335 307L340 324H145Q145 321 136 286T120 208T113 132Z"/><path id="MJX-1150-TEX-N-28" d="M94 250Q94 319 104 381T127 488T164 576T202 643T244 695T277 729T302 750H315H319Q333 750 333 741Q333 738 316 720T275 667T226 581T184 443T167 250T184 58T225 -81T274 -167T316 -220T333 -241Q333 -250 318 -250H315H302L274 -226Q180 -141 137 -14T94 250Z"/><path id="MJX-1150-TEX-I-1D465" d="M52 289Q59 331 106 386T222 442Q257 442 286 424T329 379Q371 442 430 442Q467 442 494 420T522 361Q522 332 508 314T481 292T458 288Q439 288 427 299T415 328Q415 374 465 391Q454 404 425 404Q412 404 406 402Q368 386 350 336Q290 115 290 78Q290 50 306 38T341 26Q378 26 414 59T463 140Q466 150 469 151T485 153H489Q504 153 504 145Q504 144 502 134Q486 77 440 33T333 -11Q263 -11 227 52Q186 -10 133 -10H127Q78 -10 57 16T35 71Q35 103 54 123T99 143Q142 143 142 101Q142 81 130 66T107 46T94 41L91 40Q91 39 97 36T113 29T132 26Q168 26 194 71Q203 87 217 139T245 247T261 313Q266 340 266 352Q266 380 251 392T217 404Q177 404 142 372T93 290Q91 281 88 280T72 278H58Q52 284 52 289Z"/><path id="MJX-1150-TEX-N-30" d="M96 585Q152 666 249 666Q297 666 345 640T423 548Q460 465 460 320Q460 165 417 83Q397 41 362 16T301 -15T250 -22Q224 -22 198 -16T137 16T82 83Q39 165 39 320Q39 494 96 585ZM321 597Q291 629 250 629Q208 629 178 597Q153 571 145 525T137 333Q137 175 145 125T181 46Q209 16 250 16Q290 16 318 46Q347 76 354 130T362 333Q362 478 354 524T321 597Z"/><path id="MJX-1150-TEX-N-2223" d="M139 -249H137Q125 -249 119 -235V251L120 737Q130 750 139 750Q152 750 159 735V-235Q151 -249 141 -249H139Z"/><path id="MJX-1150-TEX-I-1D461" d="M26 385Q19 392 19 395Q19 399 22 411T27 425Q29 430 36 430T87 431H140L159 511Q162 522 166 540T173 566T179 586T187 603T197 615T211 624T229 626Q247 625 254 615T261 596Q261 589 252 549T232 470L222 433Q222 431 272 431H323Q330 424 330 420Q330 398 317 385H210L174 240Q135 80 135 68Q135 26 162 26Q197 26 230 60T283 144Q285 150 288 151T303 153H307Q322 153 322 145Q322 142 319 133Q314 117 301 95T267 48T216 6T155 -11Q125 -11 98 4T59 56Q57 64 57 83V101L92 241Q127 382 128 383Q128 385 77 385H26Z"/><path id="MJX-1150-TEX-N-2C" d="M78 35T78 60T94 103T137 121Q165 121 187 96T210 8Q210 -27 201 -60T180 -117T154 -158T130 -185T117 -194Q113 -194 104 -185T95 -172Q95 -168 106 -156T131 -126T157 -76T173 -3V9L172 8Q170 7 167 6T161 3T152 1T140 0Q113 0 96 17Z"/><path id="MJX-1150-TEX-I-1D466" d="M21 287Q21 301 36 335T84 406T158 442Q199 442 224 419T250 355Q248 336 247 334Q247 331 231 288T198 191T182 105Q182 62 196 45T238 27Q261 27 281 38T312 61T339 94Q339 95 344 114T358 173T377 247Q415 397 419 404Q432 431 462 431Q475 431 483 424T494 412T496 403Q496 390 447 193T391 -23Q363 -106 294 -155T156 -205Q111 -205 77 -183T43 -117Q43 -95 50 -80T69 -58T89 -48T106 -45Q150 -45 150 -87Q150 -107 138 -122T115 -142T102 -147L99 -148Q101 -153 118 -160T152 -167H160Q177 -167 186 -165Q219 -156 247 -127T290 -65T313 -9T321 21L315 17Q309 13 296 6T270 -6Q250 -11 231 -11Q185 -11 150 11T104 82Q103 89 103 113Q103 170 138 262T173 379Q173 380 173 381Q173 390 173 393T169 400T158 404H154Q131 404 112 385T82 344T65 302T57 280Q55 278 41 278H27Q21 284 21 287Z"/><path id="MJX-1150-TEX-N-29" d="M60 749L64 750Q69 750 74 750H86L114 726Q208 641 251 514T294 250Q294 182 284 119T261 12T224 -76T186 -143T145 -194T113 -227T90 -246Q87 -249 86 -250H74Q66 -250 63 -250T58 -247T55 -238Q56 -237 66 -225Q221 -64 221 250T66 725Q56 737 55 738Q55 746 60 749Z"/></defs><g stroke="currentColor" fill="currentColor" stroke-width="0" transform="scale(1,-1)"><g data-mml-node="math"><g data-mml-node="mstyle"><g data-mml-node="msub"><g data-mml-node="mi"><use data-c="1D45D" xlink:href="#MJX-1150-TEX-I-1D45D"/></g><g data-mml-node="mi" transform="translate(536,-150) scale(0.707)"><use data-c="1D703" xlink:href="#MJX-1150-TEX-I-1D703"/></g></g><g data-mml-node="mrow" transform="translate(917.6,0)"><g data-mml-node="mo"><use data-c="28" xlink:href="#MJX-1150-TEX-N-28"/></g><g data-mml-node="msub" transform="translate(389,0)"><g data-mml-node="mi"><use data-c="1D465" xlink:href="#MJX-1150-TEX-I-1D465"/></g><g data-mml-node="mn" transform="translate(605,-150) scale(0.707)"><use data-c="30" xlink:href="#MJX-1150-TEX-N-30"/></g></g><g data-mml-node="mrow" transform="translate(1675.3,0)"><g data-mml-node="mo"><use data-c="2223" xlink:href="#MJX-1150-TEX-N-2223"/></g></g><g data-mml-node="msub" transform="translate(2231.1,0)"><g data-mml-node="mi"><use data-c="1D465" xlink:href="#MJX-1150-TEX-I-1D465"/></g><g data-mml-node="mi" transform="translate(605,-150) scale(0.707)"><use data-c="1D461" xlink:href="#MJX-1150-TEX-I-1D461"/></g></g><g data-mml-node="mo" transform="translate(3141.4,0)"><use data-c="2C" xlink:href="#MJX-1150-TEX-N-2C"/></g><g data-mml-node="mi" transform="translate(3586,0)"><use data-c="1D466" xlink:href="#MJX-1150-TEX-I-1D466"/></g><g data-mml-node="mo" transform="translate(4076,0)"><use data-c="29" xlink:href="#MJX-1150-TEX-N-29"/></g></g></g></g></g></svg></mjx-container> : (batch size, number vector embeddings - 1, number latent pixles)</div></div></div></foreignObject><text x="1547" y="708" fill="rgb(0, 0, 0)" font-family="Helvetica" font-size="25px" text-anchor="middle">` p_\theta(x_0 | x_t, y) ` : (batch size, number vector embeddings - 1, number latent pixles)</text></switch></g><path d="M 1047 885.5 L 947 886 L 947 450 L 1127 450 L 1127 196.37" fill="none" stroke="rgb(0, 0, 0)" stroke-miterlimit="10" pointer-events="stroke"/><path d="M 1127 191.12 L 1130.5 198.12 L 1127 196.37 L 1123.5 198.12 Z" fill="rgb(0, 0, 0)" stroke="rgb(0, 0, 0)" stroke-miterlimit="10" pointer-events="all"/><g transform="translate(-0.5 -0.5)"><switch><foreignObject pointer-events="none" width="100%" height="100%" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility" style="overflow: visible; text-align: left;"><div xmlns="http://www.w3.org/1999/xhtml" style="display: flex; align-items: unsafe center; justify-content: unsafe center; width: 1px; height: 1px; padding-top: 561px; margin-left: 948px;"><div data-drawio-colors="color: rgb(0, 0, 0); background-color: #ffffff; " style="box-sizing: border-box; font-size: 0px; text-align: center;"><div style="display: inline-block; font-size: 25px; font-family: Helvetica; color: rgb(0, 0, 0); line-height: 1.2; pointer-events: all; background-color: rgb(255, 255, 255); white-space: nowrap;"><mjx-container class="MathJax" jax="SVG"><svg xmlns="http://www.w3.org/2000/svg" width="14ex" height="2.262ex" role="img" focusable="false" viewBox="0 -750 6188.1 1000" xmlns:xlink="http://www.w3.org/1999/xlink" style="vertical-align: -0.566ex;"><defs><path id="MJX-1151-TEX-I-1D45D" d="M23 287Q24 290 25 295T30 317T40 348T55 381T75 411T101 433T134 442Q209 442 230 378L240 387Q302 442 358 442Q423 442 460 395T497 281Q497 173 421 82T249 -10Q227 -10 210 -4Q199 1 187 11T168 28L161 36Q160 35 139 -51T118 -138Q118 -144 126 -145T163 -148H188Q194 -155 194 -157T191 -175Q188 -187 185 -190T172 -194Q170 -194 161 -194T127 -193T65 -192Q-5 -192 -24 -194H-32Q-39 -187 -39 -183Q-37 -156 -26 -148H-6Q28 -147 33 -136Q36 -130 94 103T155 350Q156 355 156 364Q156 405 131 405Q109 405 94 377T71 316T59 280Q57 278 43 278H29Q23 284 23 287ZM178 102Q200 26 252 26Q282 26 310 49T356 107Q374 141 392 215T411 325V331Q411 405 350 405Q339 405 328 402T306 393T286 380T269 365T254 350T243 336T235 326L232 322Q232 321 229 308T218 264T204 212Q178 106 178 102Z"/><path id="MJX-1151-TEX-I-1D703" d="M35 200Q35 302 74 415T180 610T319 704Q320 704 327 704T339 705Q393 701 423 656Q462 596 462 495Q462 380 417 261T302 66T168 -10H161Q125 -10 99 10T60 63T41 130T35 200ZM383 566Q383 668 330 668Q294 668 260 623T204 521T170 421T157 371Q206 370 254 370L351 371Q352 372 359 404T375 484T383 566ZM113 132Q113 26 166 26Q181 26 198 36T239 74T287 161T335 307L340 324H145Q145 321 136 286T120 208T113 132Z"/><path id="MJX-1151-TEX-N-28" d="M94 250Q94 319 104 381T127 488T164 576T202 643T244 695T277 729T302 750H315H319Q333 750 333 741Q333 738 316 720T275 667T226 581T184 443T167 250T184 58T225 -81T274 -167T316 -220T333 -241Q333 -250 318 -250H315H302L274 -226Q180 -141 137 -14T94 250Z"/><path id="MJX-1151-TEX-I-1D465" d="M52 289Q59 331 106 386T222 442Q257 442 286 424T329 379Q371 442 430 442Q467 442 494 420T522 361Q522 332 508 314T481 292T458 288Q439 288 427 299T415 328Q415 374 465 391Q454 404 425 404Q412 404 406 402Q368 386 350 336Q290 115 290 78Q290 50 306 38T341 26Q378 26 414 59T463 140Q466 150 469 151T485 153H489Q504 153 504 145Q504 144 502 134Q486 77 440 33T333 -11Q263 -11 227 52Q186 -10 133 -10H127Q78 -10 57 16T35 71Q35 103 54 123T99 143Q142 143 142 101Q142 81 130 66T107 46T94 41L91 40Q91 39 97 36T113 29T132 26Q168 26 194 71Q203 87 217 139T245 247T261 313Q266 340 266 352Q266 380 251 392T217 404Q177 404 142 372T93 290Q91 281 88 280T72 278H58Q52 284 52 289Z"/><path id="MJX-1151-TEX-I-1D461" d="M26 385Q19 392 19 395Q19 399 22 411T27 425Q29 430 36 430T87 431H140L159 511Q162 522 166 540T173 566T179 586T187 603T197 615T211 624T229 626Q247 625 254 615T261 596Q261 589 252 549T232 470L222 433Q222 431 272 431H323Q330 424 330 420Q330 398 317 385H210L174 240Q135 80 135 68Q135 26 162 26Q197 26 230 60T283 144Q285 150 288 151T303 153H307Q322 153 322 145Q322 142 319 133Q314 117 301 95T267 48T216 6T155 -11Q125 -11 98 4T59 56Q57 64 57 83V101L92 241Q127 382 128 383Q128 385 77 385H26Z"/><path id="MJX-1151-TEX-N-2212" d="M84 237T84 250T98 270H679Q694 262 694 250T679 230H98Q84 237 84 250Z"/><path id="MJX-1151-TEX-N-31" d="M213 578L200 573Q186 568 160 563T102 556H83V602H102Q149 604 189 617T245 641T273 663Q275 666 285 666Q294 666 302 660V361L303 61Q310 54 315 52T339 48T401 46H427V0H416Q395 3 257 3Q121 3 100 0H88V46H114Q136 46 152 46T177 47T193 50T201 52T207 57T213 61V578Z"/><path id="MJX-1151-TEX-N-2223" d="M139 -249H137Q125 -249 119 -235V251L120 737Q130 750 139 750Q152 750 159 735V-235Q151 -249 141 -249H139Z"/><path id="MJX-1151-TEX-N-2C" d="M78 35T78 60T94 103T137 121Q165 121 187 96T210 8Q210 -27 201 -60T180 -117T154 -158T130 -185T117 -194Q113 -194 104 -185T95 -172Q95 -168 106 -156T131 -126T157 -76T173 -3V9L172 8Q170 7 167 6T161 3T152 1T140 0Q113 0 96 17Z"/><path id="MJX-1151-TEX-I-1D466" d="M21 287Q21 301 36 335T84 406T158 442Q199 442 224 419T250 355Q248 336 247 334Q247 331 231 288T198 191T182 105Q182 62 196 45T238 27Q261 27 281 38T312 61T339 94Q339 95 344 114T358 173T377 247Q415 397 419 404Q432 431 462 431Q475 431 483 424T494 412T496 403Q496 390 447 193T391 -23Q363 -106 294 -155T156 -205Q111 -205 77 -183T43 -117Q43 -95 50 -80T69 -58T89 -48T106 -45Q150 -45 150 -87Q150 -107 138 -122T115 -142T102 -147L99 -148Q101 -153 118 -160T152 -167H160Q177 -167 186 -165Q219 -156 247 -127T290 -65T313 -9T321 21L315 17Q309 13 296 6T270 -6Q250 -11 231 -11Q185 -11 150 11T104 82Q103 89 103 113Q103 170 138 262T173 379Q173 380 173 381Q173 390 173 393T169 400T158 404H154Q131 404 112 385T82 344T65 302T57 280Q55 278 41 278H27Q21 284 21 287Z"/><path id="MJX-1151-TEX-N-29" d="M60 749L64 750Q69 750 74 750H86L114 726Q208 641 251 514T294 250Q294 182 284 119T261 12T224 -76T186 -143T145 -194T113 -227T90 -246Q87 -249 86 -250H74Q66 -250 63 -250T58 -247T55 -238Q56 -237 66 -225Q221 -64 221 250T66 725Q56 737 55 738Q55 746 60 749Z"/></defs><g stroke="currentColor" fill="currentColor" stroke-width="0" transform="scale(1,-1)"><g data-mml-node="math"><g data-mml-node="mstyle"><g data-mml-node="msub"><g data-mml-node="mi"><use data-c="1D45D" xlink:href="#MJX-1151-TEX-I-1D45D"/></g><g data-mml-node="mi" transform="translate(536,-150) scale(0.707)"><use data-c="1D703" xlink:href="#MJX-1151-TEX-I-1D703"/></g></g><g data-mml-node="mrow" transform="translate(917.6,0)"><g data-mml-node="mo"><use data-c="28" xlink:href="#MJX-1151-TEX-N-28"/></g><g data-mml-node="msub" transform="translate(389,0)"><g data-mml-node="mi"><use data-c="1D465" xlink:href="#MJX-1151-TEX-I-1D465"/></g><g data-mml-node="mrow" transform="translate(605,-150) scale(0.707)"><g data-mml-node="mi"><use data-c="1D461" xlink:href="#MJX-1151-TEX-I-1D461"/></g><g data-mml-node="mo" transform="translate(361,0)"><use data-c="2212" xlink:href="#MJX-1151-TEX-N-2212"/></g><g data-mml-node="mn" transform="translate(1139,0)"><use data-c="31" xlink:href="#MJX-1151-TEX-N-31"/></g></g></g><g data-mml-node="mrow" transform="translate(2480.7,0)"><g data-mml-node="mo"><use data-c="2223" xlink:href="#MJX-1151-TEX-N-2223"/></g></g><g data-mml-node="msub" transform="translate(3036.5,0)"><g data-mml-node="mi"><use data-c="1D465" xlink:href="#MJX-1151-TEX-I-1D465"/></g><g data-mml-node="mi" transform="translate(605,-150) scale(0.707)"><use data-c="1D461" xlink:href="#MJX-1151-TEX-I-1D461"/></g></g><g data-mml-node="mo" transform="translate(3946.8,0)"><use data-c="2C" xlink:href="#MJX-1151-TEX-N-2C"/></g><g data-mml-node="mi" transform="translate(4391.4,0)"><use data-c="1D466" xlink:href="#MJX-1151-TEX-I-1D466"/></g><g data-mml-node="mo" transform="translate(4881.4,0)"><use data-c="29" xlink:href="#MJX-1151-TEX-N-29"/></g></g></g></g></g></svg></mjx-container> : (batch size, number latent pixels)</div></div></div></foreignObject><text x="948" y="569" fill="rgb(0, 0, 0)" font-family="Helvetica" font-size="25px" text-anchor="middle">`p_\theta(x_{t-1} | x_t, y)` : (batch size, number latent pixels)</text></switch></g><path d="M 1962 160 L 1962 396.13" fill="none" stroke="rgb(0, 0, 0)" stroke-miterlimit="10" pointer-events="stroke"/><path d="M 1962 401.38 L 1958.5 394.38 L 1962 396.13 L 1965.5 394.38 Z" fill="rgb(0, 0, 0)" stroke="rgb(0, 0, 0)" stroke-miterlimit="10" pointer-events="all"/><g transform="translate(-0.5 -0.5)"><switch><foreignObject pointer-events="none" width="100%" height="100%" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility" style="overflow: visible; text-align: left;"><div xmlns="http://www.w3.org/1999/xhtml" style="display: flex; align-items: unsafe center; justify-content: unsafe center; width: 1px; height: 1px; padding-top: 131px; margin-left: 1963px;"><div data-drawio-colors="color: rgb(0, 0, 0); background-color: rgb(255, 255, 255); " style="box-sizing: border-box; font-size: 0px; text-align: center;"><div style="display: inline-block; font-size: 25px; font-family: Helvetica; color: rgb(0, 0, 0); line-height: 1.2; pointer-events: all; background-color: rgb(255, 255, 255); white-space: nowrap;">Prompt(s)</div></div></div></foreignObject><text x="1963" y="138" fill="rgb(0, 0, 0)" font-family="Helvetica" font-size="25px" text-anchor="middle">Prompt(s)</text></switch></g><path d="M 147 460 L 167 390 L 277 390 L 297 460 Z" fill="rgb(255, 255, 255)" stroke="rgb(0, 0, 0)" stroke-miterlimit="10" pointer-events="all"/><g transform="translate(-0.5 -0.5)"><switch><foreignObject pointer-events="none" width="100%" height="100%" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility" style="overflow: visible; text-align: left;"><div xmlns="http://www.w3.org/1999/xhtml" style="display: flex; align-items: unsafe center; justify-content: unsafe center; width: 148px; height: 1px; padding-top: 425px; margin-left: 148px;"><div data-drawio-colors="color: rgb(0, 0, 0); " style="box-sizing: border-box; font-size: 0px; text-align: center;"><div style="display: inline-block; font-size: 25px; font-family: Helvetica; color: rgb(0, 0, 0); line-height: 1.2; pointer-events: all; white-space: normal; overflow-wrap: normal;">VQ-VAE decoder</div></div></div></foreignObject><text x="222" y="433" fill="rgb(0, 0, 0)" font-family="Helvetica" font-size="25px" text-anchor="middle">VQ-VAE decod...</text></switch></g><path d="M 222 460 L 222 603.63" fill="none" stroke="rgb(0, 0, 0)" stroke-miterlimit="10" pointer-events="stroke"/><path d="M 222 608.88 L 218.5 601.88 L 222 603.63 L 225.5 601.88 Z" fill="rgb(0, 0, 0)" stroke="rgb(0, 0, 0)" stroke-miterlimit="10" pointer-events="all"/><g transform="translate(-0.5 -0.5)"><switch><foreignObject pointer-events="none" width="100%" height="100%" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility" style="overflow: visible; text-align: left;"><div xmlns="http://www.w3.org/1999/xhtml" style="display: flex; align-items: unsafe center; justify-content: unsafe center; width: 1px; height: 1px; padding-top: 631px; margin-left: 218px;"><div data-drawio-colors="color: rgb(0, 0, 0); background-color: rgb(255, 255, 255); " style="box-sizing: border-box; font-size: 0px; text-align: center;"><div style="display: inline-block; font-size: 25px; font-family: Helvetica; color: rgb(0, 0, 0); line-height: 1.2; pointer-events: all; background-color: rgb(255, 255, 255); white-space: nowrap;">sample: (batch size, number rgb pixels)</div></div></div></foreignObject><text x="218" y="639" fill="rgb(0, 0, 0)" font-family="Helvetica" font-size="25px" text-anchor="middle">sample: (batch size, number rgb pixels)</text></switch></g><path d="M 1098.72 178.28 Q 947 240 782.89 172.42" fill="none" stroke="rgb(0, 0, 0)" stroke-miterlimit="10" pointer-events="stroke"/><path d="M 778.03 170.43 L 785.84 169.85 L 782.89 172.42 L 783.17 176.33 Z" fill="rgb(0, 0, 0)" stroke="rgb(0, 0, 0)" stroke-miterlimit="10" pointer-events="all"/><g transform="translate(-0.5 -0.5)"><switch><foreignObject pointer-events="none" width="100%" height="100%" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility" style="overflow: visible; text-align: left;"><div xmlns="http://www.w3.org/1999/xhtml" style="display: flex; align-items: unsafe center; justify-content: unsafe center; width: 1px; height: 1px; padding-top: 251px; margin-left: 947px;"><div data-drawio-colors="color: rgb(0, 0, 0); background-color: rgb(255, 255, 255); " style="box-sizing: border-box; font-size: 0px; text-align: center;"><div style="display: inline-block; font-size: 25px; font-family: Helvetica; color: rgb(0, 0, 0); line-height: 1.2; pointer-events: all; background-color: rgb(255, 255, 255); white-space: nowrap;"><mjx-container class="MathJax" jax="SVG" display="true"><svg xmlns="http://www.w3.org/2000/svg" width="12.673ex" height="2.262ex" role="img" focusable="false" viewBox="0 -750 5601.5 1000" xmlns:xlink="http://www.w3.org/1999/xlink" style="vertical-align: -0.566ex;"><defs><path id="MJX-1156-TEX-I-1D45D" d="M23 287Q24 290 25 295T30 317T40 348T55 381T75 411T101 433T134 442Q209 442 230 378L240 387Q302 442 358 442Q423 442 460 395T497 281Q497 173 421 82T249 -10Q227 -10 210 -4Q199 1 187 11T168 28L161 36Q160 35 139 -51T118 -138Q118 -144 126 -145T163 -148H188Q194 -155 194 -157T191 -175Q188 -187 185 -190T172 -194Q170 -194 161 -194T127 -193T65 -192Q-5 -192 -24 -194H-32Q-39 -187 -39 -183Q-37 -156 -26 -148H-6Q28 -147 33 -136Q36 -130 94 103T155 350Q156 355 156 364Q156 405 131 405Q109 405 94 377T71 316T59 280Q57 278 43 278H29Q23 284 23 287ZM178 102Q200 26 252 26Q282 26 310 49T356 107Q374 141 392 215T411 325V331Q411 405 350 405Q339 405 328 402T306 393T286 380T269 365T254 350T243 336T235 326L232 322Q232 321 229 308T218 264T204 212Q178 106 178 102Z"/><path id="MJX-1156-TEX-I-1D703" d="M35 200Q35 302 74 415T180 610T319 704Q320 704 327 704T339 705Q393 701 423 656Q462 596 462 495Q462 380 417 261T302 66T168 -10H161Q125 -10 99 10T60 63T41 130T35 200ZM383 566Q383 668 330 668Q294 668 260 623T204 521T170 421T157 371Q206 370 254 370L351 371Q352 372 359 404T375 484T383 566ZM113 132Q113 26 166 26Q181 26 198 36T239 74T287 161T335 307L340 324H145Q145 321 136 286T120 208T113 132Z"/><path id="MJX-1156-TEX-N-28" d="M94 250Q94 319 104 381T127 488T164 576T202 643T244 695T277 729T302 750H315H319Q333 750 333 741Q333 738 316 720T275 667T226 581T184 443T167 250T184 58T225 -81T274 -167T316 -220T333 -241Q333 -250 318 -250H315H302L274 -226Q180 -141 137 -14T94 250Z"/><path id="MJX-1156-TEX-I-1D465" d="M52 289Q59 331 106 386T222 442Q257 442 286 424T329 379Q371 442 430 442Q467 442 494 420T522 361Q522 332 508 314T481 292T458 288Q439 288 427 299T415 328Q415 374 465 391Q454 404 425 404Q412 404 406 402Q368 386 350 336Q290 115 290 78Q290 50 306 38T341 26Q378 26 414 59T463 140Q466 150 469 151T485 153H489Q504 153 504 145Q504 144 502 134Q486 77 440 33T333 -11Q263 -11 227 52Q186 -10 133 -10H127Q78 -10 57 16T35 71Q35 103 54 123T99 143Q142 143 142 101Q142 81 130 66T107 46T94 41L91 40Q91 39 97 36T113 29T132 26Q168 26 194 71Q203 87 217 139T245 247T261 313Q266 340 266 352Q266 380 251 392T217 404Q177 404 142 372T93 290Q91 281 88 280T72 278H58Q52 284 52 289Z"/><path id="MJX-1156-TEX-I-1D461" d="M26 385Q19 392 19 395Q19 399 22 411T27 425Q29 430 36 430T87 431H140L159 511Q162 522 166 540T173 566T179 586T187 603T197 615T211 624T229 626Q247 625 254 615T261 596Q261 589 252 549T232 470L222 433Q222 431 272 431H323Q330 424 330 420Q330 398 317 385H210L174 240Q135 80 135 68Q135 26 162 26Q197 26 230 60T283 144Q285 150 288 151T303 153H307Q322 153 322 145Q322 142 319 133Q314 117 301 95T267 48T216 6T155 -11Q125 -11 98 4T59 56Q57 64 57 83V101L92 241Q127 382 128 383Q128 385 77 385H26Z"/><path id="MJX-1156-TEX-N-2212" d="M84 237T84 250T98 270H679Q694 262 694 250T679 230H98Q84 237 84 250Z"/><path id="MJX-1156-TEX-N-32" d="M109 429Q82 429 66 447T50 491Q50 562 103 614T235 666Q326 666 387 610T449 465Q449 422 429 383T381 315T301 241Q265 210 201 149L142 93L218 92Q375 92 385 97Q392 99 409 186V189H449V186Q448 183 436 95T421 3V0H50V19V31Q50 38 56 46T86 81Q115 113 136 137Q145 147 170 174T204 211T233 244T261 278T284 308T305 340T320 369T333 401T340 431T343 464Q343 527 309 573T212 619Q179 619 154 602T119 569T109 550Q109 549 114 549Q132 549 151 535T170 489Q170 464 154 447T109 429Z"/><path id="MJX-1156-TEX-N-7C" d="M139 -249H137Q125 -249 119 -235V251L120 737Q130 750 139 750Q152 750 159 735V-235Q151 -249 141 -249H139Z"/><path id="MJX-1156-TEX-N-31" d="M213 578L200 573Q186 568 160 563T102 556H83V602H102Q149 604 189 617T245 641T273 663Q275 666 285 666Q294 666 302 660V361L303 61Q310 54 315 52T339 48T401 46H427V0H416Q395 3 257 3Q121 3 100 0H88V46H114Q136 46 152 46T177 47T193 50T201 52T207 57T213 61V578Z"/><path id="MJX-1156-TEX-N-29" d="M60 749L64 750Q69 750 74 750H86L114 726Q208 641 251 514T294 250Q294 182 284 119T261 12T224 -76T186 -143T145 -194T113 -227T90 -246Q87 -249 86 -250H74Q66 -250 63 -250T58 -247T55 -238Q56 -237 66 -225Q221 -64 221 250T66 725Q56 737 55 738Q55 746 60 749Z"/></defs><g stroke="currentColor" fill="currentColor" stroke-width="0" transform="scale(1,-1)"><g data-mml-node="math"><g data-mml-node="msub"><g data-mml-node="mi"><use data-c="1D45D" xlink:href="#MJX-1156-TEX-I-1D45D"/></g><g data-mml-node="mi" transform="translate(536,-150) scale(0.707)"><use data-c="1D703" xlink:href="#MJX-1156-TEX-I-1D703"/></g></g><g data-mml-node="mo" transform="translate(917.6,0)"><use data-c="28" xlink:href="#MJX-1156-TEX-N-28"/></g><g data-mml-node="msub" transform="translate(1306.6,0)"><g data-mml-node="mi"><use data-c="1D465" xlink:href="#MJX-1156-TEX-I-1D465"/></g><g data-mml-node="TeXAtom" transform="translate(605,-150) scale(0.707)" data-mjx-texclass="ORD"><g data-mml-node="mi"><use data-c="1D461" xlink:href="#MJX-1156-TEX-I-1D461"/></g><g data-mml-node="mo" transform="translate(361,0)"><use data-c="2212" xlink:href="#MJX-1156-TEX-N-2212"/></g><g data-mml-node="mn" transform="translate(1139,0)"><use data-c="32" xlink:href="#MJX-1156-TEX-N-32"/></g></g></g><g data-mml-node="TeXAtom" data-mjx-texclass="ORD" transform="translate(3120.6,0)"><g data-mml-node="mo" transform="translate(0 -0.5)"><use data-c="7C" xlink:href="#MJX-1156-TEX-N-7C"/></g></g><g data-mml-node="msub" transform="translate(3398.6,0)"><g data-mml-node="mi"><use data-c="1D465" xlink:href="#MJX-1156-TEX-I-1D465"/></g><g data-mml-node="TeXAtom" transform="translate(605,-150) scale(0.707)" data-mjx-texclass="ORD"><g data-mml-node="mi"><use data-c="1D461" xlink:href="#MJX-1156-TEX-I-1D461"/></g><g data-mml-node="mo" transform="translate(361,0)"><use data-c="2212" xlink:href="#MJX-1156-TEX-N-2212"/></g><g data-mml-node="mn" transform="translate(1139,0)"><use data-c="31" xlink:href="#MJX-1156-TEX-N-31"/></g></g></g><g data-mml-node="mo" transform="translate(5212.5,0)"><use data-c="29" xlink:href="#MJX-1156-TEX-N-29"/></g></g></g></svg></mjx-container></div></div></div></foreignObject><text x="947" y="258" fill="rgb(0, 0, 0)" font-family="Helvetica" font-size="25px" text-anchor="middle">$$ p_\theta(x_{t-2} | x_{t-1}) $$</text></switch></g><path d="M 777 170 Q 587 240 431.23 180.55" fill="none" stroke="rgb(0, 0, 0)" stroke-miterlimit="10" pointer-events="stroke"/><path d="M 426.33 178.68 L 434.12 177.91 L 431.23 180.55 L 431.62 184.45 Z" fill="rgb(0, 0, 0)" stroke="rgb(0, 0, 0)" stroke-miterlimit="10" pointer-events="all"/><g transform="translate(-0.5 -0.5)"><switch><foreignObject pointer-events="none" width="100%" height="100%" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility" style="overflow: visible; text-align: left;"><div xmlns="http://www.w3.org/1999/xhtml" style="display: flex; align-items: unsafe center; justify-content: unsafe center; width: 1px; height: 1px; padding-top: 250px; margin-left: 597px;"><div data-drawio-colors="color: rgb(0, 0, 0); background-color: rgb(255, 255, 255); " style="box-sizing: border-box; font-size: 0px; text-align: center;"><div style="display: inline-block; font-size: 25px; font-family: Helvetica; color: rgb(0, 0, 0); line-height: 1.2; pointer-events: all; background-color: rgb(255, 255, 255); white-space: nowrap;"><mjx-container class="MathJax" jax="SVG" display="true"><svg xmlns="http://www.w3.org/2000/svg" width="9.029ex" height="2.262ex" role="img" focusable="false" viewBox="0 -750 3990.7 1000" xmlns:xlink="http://www.w3.org/1999/xlink" style="vertical-align: -0.566ex;"><defs><path id="MJX-1157-TEX-I-1D45D" d="M23 287Q24 290 25 295T30 317T40 348T55 381T75 411T101 433T134 442Q209 442 230 378L240 387Q302 442 358 442Q423 442 460 395T497 281Q497 173 421 82T249 -10Q227 -10 210 -4Q199 1 187 11T168 28L161 36Q160 35 139 -51T118 -138Q118 -144 126 -145T163 -148H188Q194 -155 194 -157T191 -175Q188 -187 185 -190T172 -194Q170 -194 161 -194T127 -193T65 -192Q-5 -192 -24 -194H-32Q-39 -187 -39 -183Q-37 -156 -26 -148H-6Q28 -147 33 -136Q36 -130 94 103T155 350Q156 355 156 364Q156 405 131 405Q109 405 94 377T71 316T59 280Q57 278 43 278H29Q23 284 23 287ZM178 102Q200 26 252 26Q282 26 310 49T356 107Q374 141 392 215T411 325V331Q411 405 350 405Q339 405 328 402T306 393T286 380T269 365T254 350T243 336T235 326L232 322Q232 321 229 308T218 264T204 212Q178 106 178 102Z"/><path id="MJX-1157-TEX-I-1D703" d="M35 200Q35 302 74 415T180 610T319 704Q320 704 327 704T339 705Q393 701 423 656Q462 596 462 495Q462 380 417 261T302 66T168 -10H161Q125 -10 99 10T60 63T41 130T35 200ZM383 566Q383 668 330 668Q294 668 260 623T204 521T170 421T157 371Q206 370 254 370L351 371Q352 372 359 404T375 484T383 566ZM113 132Q113 26 166 26Q181 26 198 36T239 74T287 161T335 307L340 324H145Q145 321 136 286T120 208T113 132Z"/><path id="MJX-1157-TEX-N-28" d="M94 250Q94 319 104 381T127 488T164 576T202 643T244 695T277 729T302 750H315H319Q333 750 333 741Q333 738 316 720T275 667T226 581T184 443T167 250T184 58T225 -81T274 -167T316 -220T333 -241Q333 -250 318 -250H315H302L274 -226Q180 -141 137 -14T94 250Z"/><path id="MJX-1157-TEX-I-1D465" d="M52 289Q59 331 106 386T222 442Q257 442 286 424T329 379Q371 442 430 442Q467 442 494 420T522 361Q522 332 508 314T481 292T458 288Q439 288 427 299T415 328Q415 374 465 391Q454 404 425 404Q412 404 406 402Q368 386 350 336Q290 115 290 78Q290 50 306 38T341 26Q378 26 414 59T463 140Q466 150 469 151T485 153H489Q504 153 504 145Q504 144 502 134Q486 77 440 33T333 -11Q263 -11 227 52Q186 -10 133 -10H127Q78 -10 57 16T35 71Q35 103 54 123T99 143Q142 143 142 101Q142 81 130 66T107 46T94 41L91 40Q91 39 97 36T113 29T132 26Q168 26 194 71Q203 87 217 139T245 247T261 313Q266 340 266 352Q266 380 251 392T217 404Q177 404 142 372T93 290Q91 281 88 280T72 278H58Q52 284 52 289Z"/><path id="MJX-1157-TEX-N-30" d="M96 585Q152 666 249 666Q297 666 345 640T423 548Q460 465 460 320Q460 165 417 83Q397 41 362 16T301 -15T250 -22Q224 -22 198 -16T137 16T82 83Q39 165 39 320Q39 494 96 585ZM321 597Q291 629 250 629Q208 629 178 597Q153 571 145 525T137 333Q137 175 145 125T181 46Q209 16 250 16Q290 16 318 46Q347 76 354 130T362 333Q362 478 354 524T321 597Z"/><path id="MJX-1157-TEX-N-7C" d="M139 -249H137Q125 -249 119 -235V251L120 737Q130 750 139 750Q152 750 159 735V-235Q151 -249 141 -249H139Z"/><path id="MJX-1157-TEX-N-31" d="M213 578L200 573Q186 568 160 563T102 556H83V602H102Q149 604 189 617T245 641T273 663Q275 666 285 666Q294 666 302 660V361L303 61Q310 54 315 52T339 48T401 46H427V0H416Q395 3 257 3Q121 3 100 0H88V46H114Q136 46 152 46T177 47T193 50T201 52T207 57T213 61V578Z"/><path id="MJX-1157-TEX-N-29" d="M60 749L64 750Q69 750 74 750H86L114 726Q208 641 251 514T294 250Q294 182 284 119T261 12T224 -76T186 -143T145 -194T113 -227T90 -246Q87 -249 86 -250H74Q66 -250 63 -250T58 -247T55 -238Q56 -237 66 -225Q221 -64 221 250T66 725Q56 737 55 738Q55 746 60 749Z"/></defs><g stroke="currentColor" fill="currentColor" stroke-width="0" transform="scale(1,-1)"><g data-mml-node="math"><g data-mml-node="msub"><g data-mml-node="mi"><use data-c="1D45D" xlink:href="#MJX-1157-TEX-I-1D45D"/></g><g data-mml-node="mi" transform="translate(536,-150) scale(0.707)"><use data-c="1D703" xlink:href="#MJX-1157-TEX-I-1D703"/></g></g><g data-mml-node="mo" transform="translate(917.6,0)"><use data-c="28" xlink:href="#MJX-1157-TEX-N-28"/></g><g data-mml-node="msub" transform="translate(1306.6,0)"><g data-mml-node="mi"><use data-c="1D465" xlink:href="#MJX-1157-TEX-I-1D465"/></g><g data-mml-node="mn" transform="translate(605,-150) scale(0.707)"><use data-c="30" xlink:href="#MJX-1157-TEX-N-30"/></g></g><g data-mml-node="TeXAtom" data-mjx-texclass="ORD" transform="translate(2315.2,0)"><g data-mml-node="mo" transform="translate(0 -0.5)"><use data-c="7C" xlink:href="#MJX-1157-TEX-N-7C"/></g></g><g data-mml-node="msub" transform="translate(2593.2,0)"><g data-mml-node="mi"><use data-c="1D465" xlink:href="#MJX-1157-TEX-I-1D465"/></g><g data-mml-node="mn" transform="translate(605,-150) scale(0.707)"><use data-c="31" xlink:href="#MJX-1157-TEX-N-31"/></g></g><g data-mml-node="mo" transform="translate(3601.7,0)"><use data-c="29" xlink:href="#MJX-1157-TEX-N-29"/></g></g></g></svg></mjx-container></div></div></div></foreignObject><text x="597" y="258" fill="rgb(0, 0, 0)" font-family="Helvetica" font-size="25px" text-anchor="middle">$$ p_\theta(x_0 | x_1) $$</text></switch></g><path d="M 357 150 L 222 150 L 222 383.63" fill="none" stroke="rgb(0, 0, 0)" stroke-miterlimit="10" pointer-events="stroke"/><path d="M 222 388.88 L 218.5 381.88 L 222 383.63 L 225.5 381.88 Z" fill="rgb(0, 0, 0)" stroke="rgb(0, 0, 0)" stroke-miterlimit="10" pointer-events="all"/><g transform="translate(-0.5 -0.5)"><switch><foreignObject pointer-events="none" width="100%" height="100%" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility" style="overflow: visible; text-align: left;"><div xmlns="http://www.w3.org/1999/xhtml" style="display: flex; align-items: unsafe center; justify-content: unsafe center; width: 1px; height: 1px; padding-top: 331px; margin-left: 208px;"><div data-drawio-colors="color: rgb(0, 0, 0); background-color: rgb(255, 255, 255); " style="box-sizing: border-box; font-size: 0px; text-align: center;"><div style="display: inline-block; font-size: 25px; font-family: Helvetica; color: rgb(0, 0, 0); line-height: 1.2; pointer-events: all; background-color: rgb(255, 255, 255); white-space: nowrap;">x<sub style="font-size: 25px;">0</sub>: (batch size, number latent pixels)</div></div></div></foreignObject><text x="208" y="338" fill="rgb(0, 0, 0)" font-family="Helvetica" font-size="25px" text-anchor="middle">x0: (batch size, number latent pixels)</text></switch></g></g><switch><g requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility"/><a transform="translate(0,-5)" xlink:href="https://www.diagrams.net/doc/faq/svg-export-text-problems" target="_blank"><text text-anchor="middle" font-size="10px" x="50%" y="100%">Text is not SVG - cannot display</text></a></switch></svg>
8
0
hf_public_repos/blog/assets
hf_public_repos/blog/assets/93_deep_rl_ppo/test
9
0
hf_public_repos/candle/candle-examples/examples
hf_public_repos/candle/candle-examples/examples/vgg/main.rs
#[cfg(feature = "mkl")] extern crate intel_mkl_src; #[cfg(feature = "accelerate")] extern crate accelerate_src; use candle::{DType, IndexOp, D}; use candle_nn::{ModuleT, VarBuilder}; use candle_transformers::models::vgg::{Models, Vgg}; use clap::{Parser, ValueEnum}; #[derive(Clone, Copy, Debug, ValueEnum)] enum Which { Vgg13, Vgg16, Vgg19, } #[derive(Parser)] struct Args { #[arg(long)] image: String, /// Run on CPU rather than on GPU. #[arg(long)] cpu: bool, /// Variant of the model to use. #[arg(value_enum, long, default_value_t = Which::Vgg13)] which: Which, } pub fn main() -> anyhow::Result<()> { let args = Args::parse(); let device = candle_examples::device(args.cpu)?; let image = candle_examples::imagenet::load_image224(args.image)?.to_device(&device)?; println!("loaded image {image:?}"); let api = hf_hub::api::sync::Api::new()?; let repo = match args.which { Which::Vgg13 => "timm/vgg13.tv_in1k", Which::Vgg16 => "timm/vgg16.tv_in1k", Which::Vgg19 => "timm/vgg19.tv_in1k", }; let api = api.model(repo.into()); let filename = "model.safetensors"; let model_file = api.get(filename)?; let vb = unsafe { VarBuilder::from_mmaped_safetensors(&[model_file], DType::F32, &device)? }; let model = match args.which { Which::Vgg13 => Vgg::new(vb, Models::Vgg13)?, Which::Vgg16 => Vgg::new(vb, Models::Vgg16)?, Which::Vgg19 => Vgg::new(vb, Models::Vgg19)?, }; let logits = model.forward_t(&image, /*train=*/ false)?; let prs = candle_nn::ops::softmax(&logits, D::Minus1)? .i(0)? .to_vec1::<f32>()?; // Sort the predictions and take the top 5 let mut top: Vec<_> = prs.iter().enumerate().collect(); top.sort_by(|a, b| b.1.partial_cmp(a.1).unwrap()); let top = top.into_iter().take(5).collect::<Vec<_>>(); // Print the top predictions for &(i, p) in &top { println!( "{:50}: {:.2}%", candle_examples::imagenet::CLASSES[i], p * 100.0 ); } Ok(()) }
0
0
hf_public_repos/candle/candle-examples/examples
hf_public_repos/candle/candle-examples/examples/vgg/README.md
## VGG Model Implementation This example demonstrates the implementation of VGG models (VGG13, VGG16, VGG19) using the Candle library. The VGG models are defined in `candle-transformers/src/models/vgg.rs`. The main function in `candle-examples/examples/vgg/main.rs` loads an image, selects the VGG model based on the provided argument, and applies the model to the loaded image. You can run the example with the following command: ```bash cargo run --example vgg --release -- --image ../yolo-v8/assets/bike.jpg --which vgg13 ``` In the command above, `--image` specifies the path to the image file and `--which` specifies the VGG model to use (vgg13, vgg16, or vgg19).
1
0
hf_public_repos/candle/candle-examples/examples
hf_public_repos/candle/candle-examples/examples/bert/main.rs
#[cfg(feature = "mkl")] extern crate intel_mkl_src; #[cfg(feature = "accelerate")] extern crate accelerate_src; use candle_transformers::models::bert::{BertModel, Config, HiddenAct, DTYPE}; use anyhow::{Error as E, Result}; use candle::Tensor; use candle_nn::VarBuilder; use clap::Parser; use hf_hub::{api::sync::Api, Repo, RepoType}; use tokenizers::{PaddingParams, Tokenizer}; #[derive(Parser, Debug)] #[command(author, version, about, long_about = None)] struct Args { /// Run on CPU rather than on GPU. #[arg(long)] cpu: bool, /// Enable tracing (generates a trace-timestamp.json file). #[arg(long)] tracing: bool, /// The model to use, check out available models: https://huggingface.co/models?library=sentence-transformers&sort=trending #[arg(long)] model_id: Option<String>, #[arg(long)] revision: Option<String>, /// When set, compute embeddings for this prompt. #[arg(long)] prompt: Option<String>, /// Use the pytorch weights rather than the safetensors ones #[arg(long)] use_pth: bool, /// The number of times to run the prompt. #[arg(long, default_value = "1")] n: usize, /// L2 normalization for embeddings. #[arg(long, default_value = "true")] normalize_embeddings: bool, /// Use tanh based approximation for Gelu instead of erf implementation. #[arg(long, default_value = "false")] approximate_gelu: bool, } impl Args { fn build_model_and_tokenizer(&self) -> Result<(BertModel, Tokenizer)> { let device = candle_examples::device(self.cpu)?; let default_model = "sentence-transformers/all-MiniLM-L6-v2".to_string(); let default_revision = "refs/pr/21".to_string(); let (model_id, revision) = match (self.model_id.to_owned(), self.revision.to_owned()) { (Some(model_id), Some(revision)) => (model_id, revision), (Some(model_id), None) => (model_id, "main".to_string()), (None, Some(revision)) => (default_model, revision), (None, None) => (default_model, default_revision), }; let repo = Repo::with_revision(model_id, RepoType::Model, revision); let (config_filename, tokenizer_filename, weights_filename) = { let api = Api::new()?; let api = api.repo(repo); let config = api.get("config.json")?; let tokenizer = api.get("tokenizer.json")?; let weights = if self.use_pth { api.get("pytorch_model.bin")? } else { api.get("model.safetensors")? }; (config, tokenizer, weights) }; let config = std::fs::read_to_string(config_filename)?; let mut config: Config = serde_json::from_str(&config)?; let tokenizer = Tokenizer::from_file(tokenizer_filename).map_err(E::msg)?; let vb = if self.use_pth { VarBuilder::from_pth(&weights_filename, DTYPE, &device)? } else { unsafe { VarBuilder::from_mmaped_safetensors(&[weights_filename], DTYPE, &device)? } }; if self.approximate_gelu { config.hidden_act = HiddenAct::GeluApproximate; } let model = BertModel::load(vb, &config)?; Ok((model, tokenizer)) } } fn main() -> Result<()> { use tracing_chrome::ChromeLayerBuilder; use tracing_subscriber::prelude::*; let args = Args::parse(); let _guard = if args.tracing { println!("tracing..."); let (chrome_layer, guard) = ChromeLayerBuilder::new().build(); tracing_subscriber::registry().with(chrome_layer).init(); Some(guard) } else { None }; let start = std::time::Instant::now(); let (model, mut tokenizer) = args.build_model_and_tokenizer()?; let device = &model.device; if let Some(prompt) = args.prompt { let tokenizer = tokenizer .with_padding(None) .with_truncation(None) .map_err(E::msg)?; let tokens = tokenizer .encode(prompt, true) .map_err(E::msg)? .get_ids() .to_vec(); let token_ids = Tensor::new(&tokens[..], device)?.unsqueeze(0)?; let token_type_ids = token_ids.zeros_like()?; println!("Loaded and encoded {:?}", start.elapsed()); for idx in 0..args.n { let start = std::time::Instant::now(); let ys = model.forward(&token_ids, &token_type_ids, None)?; if idx == 0 { println!("{ys}"); } println!("Took {:?}", start.elapsed()); } } else { let sentences = [ "The cat sits outside", "A man is playing guitar", "I love pasta", "The new movie is awesome", "The cat plays in the garden", "A woman watches TV", "The new movie is so great", "Do you like pizza?", ]; let n_sentences = sentences.len(); if let Some(pp) = tokenizer.get_padding_mut() { pp.strategy = tokenizers::PaddingStrategy::BatchLongest } else { let pp = PaddingParams { strategy: tokenizers::PaddingStrategy::BatchLongest, ..Default::default() }; tokenizer.with_padding(Some(pp)); } let tokens = tokenizer .encode_batch(sentences.to_vec(), true) .map_err(E::msg)?; let token_ids = tokens .iter() .map(|tokens| { let tokens = tokens.get_ids().to_vec(); Ok(Tensor::new(tokens.as_slice(), device)?) }) .collect::<Result<Vec<_>>>()?; let attention_mask = tokens .iter() .map(|tokens| { let tokens = tokens.get_attention_mask().to_vec(); Ok(Tensor::new(tokens.as_slice(), device)?) }) .collect::<Result<Vec<_>>>()?; let token_ids = Tensor::stack(&token_ids, 0)?; let attention_mask = Tensor::stack(&attention_mask, 0)?; let token_type_ids = token_ids.zeros_like()?; println!("running inference on batch {:?}", token_ids.shape()); let embeddings = model.forward(&token_ids, &token_type_ids, Some(&attention_mask))?; println!("generated embeddings {:?}", embeddings.shape()); // Apply some avg-pooling by taking the mean embedding value for all tokens (including padding) let (_n_sentence, n_tokens, _hidden_size) = embeddings.dims3()?; let embeddings = (embeddings.sum(1)? / (n_tokens as f64))?; let embeddings = if args.normalize_embeddings { normalize_l2(&embeddings)? } else { embeddings }; println!("pooled embeddings {:?}", embeddings.shape()); let mut similarities = vec![]; for i in 0..n_sentences { let e_i = embeddings.get(i)?; for j in (i + 1)..n_sentences { let e_j = embeddings.get(j)?; let sum_ij = (&e_i * &e_j)?.sum_all()?.to_scalar::<f32>()?; let sum_i2 = (&e_i * &e_i)?.sum_all()?.to_scalar::<f32>()?; let sum_j2 = (&e_j * &e_j)?.sum_all()?.to_scalar::<f32>()?; let cosine_similarity = sum_ij / (sum_i2 * sum_j2).sqrt(); similarities.push((cosine_similarity, i, j)) } } similarities.sort_by(|u, v| v.0.total_cmp(&u.0)); for &(score, i, j) in similarities[..5].iter() { println!("score: {score:.2} '{}' '{}'", sentences[i], sentences[j]) } } Ok(()) } pub fn normalize_l2(v: &Tensor) -> Result<Tensor> { Ok(v.broadcast_div(&v.sqr()?.sum_keepdim(1)?.sqrt()?)?) }
2
0
hf_public_repos/candle/candle-examples/examples
hf_public_repos/candle/candle-examples/examples/bert/README.md
# candle-bert Bert is a general large language model. In this example it can be used for two different tasks: - Compute sentence embeddings for a prompt. - Compute similarities between a set of sentences. ## Sentence embeddings Bert is used to compute the sentence embeddings for a prompt. The model weights are downloaded from the hub on the first run. ```bash cargo run --example bert --release -- --prompt "Here is a test sentence" > [[[ 0.0798, -0.0665, -0.0247, ..., -0.1082, -0.1000, -0.2751], > [ 0.4218, 0.2690, 0.2740, ..., 0.3889, 1.3503, 0.9908], > [ 0.0466, 0.3041, -0.1143, ..., 0.4427, 0.6926, -0.1515], > ... > [ 0.3396, 0.4320, -0.4408, ..., 0.9212, 0.2331, -0.6777], > [ 0.2789, 0.7539, 0.4306, ..., -0.0095, 0.3375, -1.7529], > [ 0.6737, 0.7882, 0.0548, ..., 0.1836, 0.7299, -0.6617]]] > Tensor[[1, 7, 384], f32] ``` ### Custom models You can specify different models, such as BGE, with the `--model-id` flag: ```bash cargo run --example bert --release -- \ --model-id BAAI/bge-large-zh-v1.5 \ --prompt "Here is a test sentence" Loaded and encoded 435.70775ms [[[ 3.0944e-1, -7.8455e-5, -1.2768e0, ..., 1.3755e-2, -3.2371e-1, 2.3819e-1], [-2.8506e-1, 1.9953e-1, -1.3076e0, ..., 6.9819e-2, 1.0833e-2, -1.1512e0], [ 3.9892e-1, 2.0000e-1, -9.3178e-1, ..., -4.1393e-1, -4.9644e-2, -3.3786e-1], ... [ 6.0345e-1, 3.5744e-1, -1.2672e0, ..., -6.9165e-1, -3.4973e-3, -8.4214e-1], [ 3.9218e-1, -3.2735e-1, -1.3123e0, ..., -4.9318e-1, -5.1334e-1, -3.6391e-1], [ 3.0978e-1, 2.5662e-4, -1.2773e0, ..., 1.3357e-2, -3.2390e-1, 2.3858e-1]]] Tensor[[1, 9, 1024], f32] Took 176.744667ms ``` ### Gelu approximation You can get a speedup by using an approximation of the gelu activation, with a small loss of precision, by passing the `--approximate-gelu` flag: ```bash $ cargo run --example bert --release -- \ --model-id BAAI/bge-large-zh-v1.5 \ --prompt "Here is a test sentence" \ --approximate-gelu Loaded and encoded 244.388042ms [[[ 3.1048e-1, -6.0339e-4, -1.2758e0, ..., 1.3718e-2, -3.2362e-1, 2.3775e-1], [-2.8354e-1, 1.9984e-1, -1.3077e0, ..., 6.9390e-2, 9.9681e-3, -1.1531e0], [ 3.9947e-1, 1.9917e-1, -9.3178e-1, ..., -4.1301e-1, -5.0719e-2, -3.3955e-1], ... [ 6.0499e-1, 3.5664e-1, -1.2642e0, ..., -6.9134e-1, -3.4581e-3, -8.4471e-1], [ 3.9311e-1, -3.2812e-1, -1.3105e0, ..., -4.9291e-1, -5.1270e-1, -3.6543e-1], [ 3.1082e-1, -2.6737e-4, -1.2762e0, ..., 1.3319e-2, -3.2381e-1, 2.3815e-1]]] Tensor[[1, 9, 1024], f32] Took 116.840791ms ``` ## Similarities In this example, Bert is used to compute the sentence embeddings for a set of sentences (hardcoded in the examples). Then cosine similarities are computed for each sentence pair and they are reported by decreasing values, hence the first reported pair contains the two sentences that have the highest similarity score. The sentence embeddings are computed using average pooling through all the sentence tokens, including some potential padding. ```bash cargo run --example bert --release > score: 0.85 'The new movie is awesome' 'The new movie is so great' > score: 0.61 'The cat sits outside' 'The cat plays in the garden' > score: 0.52 'I love pasta' 'Do you like pizza?' > score: 0.23 'The new movie is awesome' 'Do you like pizza?' > score: 0.22 'I love pasta' 'The new movie is awesome' ```
3
0
hf_public_repos/candle/candle-examples/examples
hf_public_repos/candle/candle-examples/examples/depth_anything_v2/color_map.rs
use enterpolation::linear::ConstEquidistantLinear; use enterpolation::Generator; use palette::LinSrgb; use candle::Tensor; pub struct SpectralRColormap { gradient: ConstEquidistantLinear<f32, LinSrgb, 9>, } impl SpectralRColormap { pub(crate) fn new() -> Self { // Define a colormap similar to 'Spectral_r' by specifying key colors. // got the colors from ChatGPT-4o let gradient = ConstEquidistantLinear::<f32, _, 9>::equidistant_unchecked([ LinSrgb::new(0.3686, 0.3098, 0.6353), // Dark blue LinSrgb::new(0.1961, 0.5333, 0.7412), // Blue LinSrgb::new(0.4000, 0.7608, 0.6471), // Cyan LinSrgb::new(0.6706, 0.8667, 0.6431), // Green LinSrgb::new(0.9020, 0.9608, 0.5961), // Yellow LinSrgb::new(0.9961, 0.8784, 0.5451), // Orange LinSrgb::new(0.9922, 0.6824, 0.3804), // Red LinSrgb::new(0.9569, 0.4275, 0.2627), // Dark red LinSrgb::new(0.8353, 0.2431, 0.3098), // Dark purple ]); Self { gradient } } fn get_color(&self, value: f32) -> LinSrgb { self.gradient.gen(value) } pub fn gray2color(&self, gray: &Tensor) -> candle::Result<Tensor> { println!("Gray: {:?}", gray.dims()); let gray_values: Vec<f32> = gray.flatten_all()?.to_vec1()?; let rgb_values: Vec<f32> = gray_values .iter() .map(|g| self.get_color(*g)) .flat_map(|rgb| [rgb.red, rgb.green, rgb.blue]) .collect(); let [.., height, width] = gray.dims() else { candle::bail!("Not enough dims!") }; let color = Tensor::from_vec(rgb_values, (*height, *width, 3), gray.device())?; color.permute((2, 0, 1)) } }
4
0
hf_public_repos/candle/candle-examples/examples
hf_public_repos/candle/candle-examples/examples/depth_anything_v2/main.rs
//! Depth Anything V2 //! https://huggingface.co/spaces/depth-anything/Depth-Anything-V2 #[cfg(feature = "accelerate")] extern crate accelerate_src; #[cfg(feature = "mkl")] extern crate intel_mkl_src; use std::ffi::OsString; use std::path::PathBuf; use clap::Parser; use candle::DType::{F32, U8}; use candle::{DType, Device, Module, Result, Tensor}; use candle_examples::{load_image, load_image_and_resize, save_image}; use candle_nn::VarBuilder; use candle_transformers::models::depth_anything_v2::{DepthAnythingV2, DepthAnythingV2Config}; use candle_transformers::models::dinov2; use crate::color_map::SpectralRColormap; mod color_map; // taken these from: https://huggingface.co/spaces/depth-anything/Depth-Anything-V2/blob/main/depth_anything_v2/dpt.py#L207 const MAGIC_MEAN: [f32; 3] = [0.485, 0.456, 0.406]; const MAGIC_STD: [f32; 3] = [0.229, 0.224, 0.225]; const DINO_IMG_SIZE: usize = 518; #[derive(Parser)] struct Args { #[arg(long)] dinov2_model: Option<PathBuf>, #[arg(long)] depth_anything_v2_model: Option<PathBuf>, #[arg(long)] image: PathBuf, #[arg(long)] output_dir: Option<PathBuf>, #[arg(long)] cpu: bool, #[arg(long)] color_map: bool, } pub fn main() -> anyhow::Result<()> { let args = Args::parse(); let device = candle_examples::device(args.cpu)?; let dinov2_model_file = match args.dinov2_model { None => { let api = hf_hub::api::sync::Api::new()?; let api = api.model("lmz/candle-dino-v2".into()); api.get("dinov2_vits14.safetensors")? } Some(dinov2_model) => dinov2_model, }; println!("Using file {:?}", dinov2_model_file); let vb = unsafe { VarBuilder::from_mmaped_safetensors(&[dinov2_model_file], F32, &device)? }; let dinov2 = dinov2::vit_small(vb)?; println!("DinoV2 model built"); let depth_anything_model_file = match args.depth_anything_v2_model { None => { let api = hf_hub::api::sync::Api::new()?; let api = api.model("jeroenvlek/depth-anything-v2-safetensors".into()); api.get("depth_anything_v2_vits.safetensors")? } Some(depth_anything_model) => depth_anything_model, }; println!("Using file {:?}", depth_anything_model_file); let vb = unsafe { VarBuilder::from_mmaped_safetensors(&[depth_anything_model_file], DType::F32, &device)? }; let config = DepthAnythingV2Config::vit_small(); let depth_anything = DepthAnythingV2::new(&dinov2, &config, vb)?; let (original_height, original_width, image) = load_and_prep_image(&args.image, &device)?; println!("Loaded image {image:?}"); let depth = depth_anything.forward(&image)?; println!("Got predictions {:?}", depth.shape()); let output_image = post_process_image(&depth, original_height, original_width, args.color_map)?; let output_path = full_output_path(&args.image, &args.output_dir); println!("Saving image to {}", output_path.to_string_lossy()); save_image(&output_image, output_path)?; Ok(()) } fn full_output_path(image_path: &PathBuf, output_dir: &Option<PathBuf>) -> PathBuf { let input_file_name = image_path.file_name().unwrap(); let mut output_file_name = OsString::from("depth_"); output_file_name.push(input_file_name); let mut output_path = match output_dir { None => image_path.parent().unwrap().to_path_buf(), Some(output_path) => output_path.clone(), }; output_path.push(output_file_name); output_path } fn load_and_prep_image( image_path: &PathBuf, device: &Device, ) -> anyhow::Result<(usize, usize, Tensor)> { let (_original_image, original_height, original_width) = load_image(&image_path, None)?; let image = load_image_and_resize(&image_path, DINO_IMG_SIZE, DINO_IMG_SIZE)? .unsqueeze(0)? .to_dtype(F32)? .to_device(&device)?; let max_pixel_val = Tensor::try_from(255.0f32)? .to_device(&device)? .broadcast_as(image.shape())?; let image = (image / max_pixel_val)?; let image = normalize_image(&image, &MAGIC_MEAN, &MAGIC_STD)?; Ok((original_height, original_width, image)) } fn normalize_image(image: &Tensor, mean: &[f32; 3], std: &[f32; 3]) -> Result<Tensor> { let mean_tensor = Tensor::from_vec(mean.to_vec(), (3, 1, 1), &image.device())?.broadcast_as(image.shape())?; let std_tensor = Tensor::from_vec(std.to_vec(), (3, 1, 1), &image.device())?.broadcast_as(image.shape())?; image.sub(&mean_tensor)?.div(&std_tensor) } fn post_process_image( image: &Tensor, original_height: usize, original_width: usize, color_map: bool, ) -> Result<Tensor> { let out = image.interpolate2d(original_height, original_width)?; let out = scale_image(&out)?; let out = if color_map { let spectral_r = SpectralRColormap::new(); spectral_r.gray2color(&out)? } else { let rgb_slice = [&out, &out, &out]; Tensor::cat(&rgb_slice, 0)?.squeeze(1)? }; let max_pixel_val = Tensor::try_from(255.0f32)? .to_device(out.device())? .broadcast_as(out.shape())?; let out = (out * max_pixel_val)?; out.to_dtype(U8) } fn scale_image(depth: &Tensor) -> Result<Tensor> { let flat_values: Vec<f32> = depth.flatten_all()?.to_vec1()?; let min_val = flat_values.iter().min_by(|a, b| a.total_cmp(b)).unwrap(); let max_val = flat_values.iter().max_by(|a, b| a.total_cmp(b)).unwrap(); let min_val_tensor = Tensor::try_from(*min_val)? .to_device(depth.device())? .broadcast_as(depth.shape())?; let depth = (depth - min_val_tensor)?; let range = max_val - min_val; let range_tensor = Tensor::try_from(range)? .to_device(depth.device())? .broadcast_as(depth.shape())?; depth / range_tensor }
5
0
hf_public_repos/candle/candle-examples/examples
hf_public_repos/candle/candle-examples/examples/depth_anything_v2/README.md
# candle-dinov2 [Depth Anything V2] is a model for Monocular Depth Estimation (MDE, i.e. just using a single image) which builds on the [DINOv2](https://github.com/facebookresearch/dinov2) vision transformer. This example first instantiates the DINOv2 model and then proceeds to create DepthAnythingV2 and run it. ## Running an example with color map and CUDA ```bash cargo run --features cuda,depth_anything_v2 --package candle-examples --example depth_anything_v2 -- --color-map --image candle-examples/examples/yolo-v8/assets/bike.jpg ```
6
0
hf_public_repos/candle/candle-examples/examples
hf_public_repos/candle/candle-examples/examples/mimi/main.rs
#[cfg(feature = "mkl")] extern crate intel_mkl_src; #[cfg(feature = "accelerate")] extern crate accelerate_src; use anyhow::Result; use candle::{DType, IndexOp, Tensor}; use candle_nn::VarBuilder; use candle_transformers::models::mimi::{Config, Model}; use clap::{Parser, ValueEnum}; use hf_hub::api::sync::Api; mod audio_io; #[derive(Clone, Debug, Copy, PartialEq, Eq, ValueEnum)] enum Action { AudioToAudio, AudioToCode, CodeToAudio, } #[derive(Parser, Debug)] #[command(author, version, about, long_about = None)] struct Args { /// The action to be performed, specifies the format for the input and output data. action: Action, /// The input file, either an audio file or some mimi tokens stored as safetensors. in_file: String, /// The output file, either a wave audio file or some mimi tokens stored as safetensors. out_file: String, /// Run on CPU rather than on GPU. #[arg(long)] cpu: bool, /// The model weight file, in safetensor format. #[arg(long)] model: Option<String>, /// Whether to use streaming or not, when streaming slices of data of the given size are passed /// to the encoder/decoder one at a time. #[arg(long)] streaming: Option<usize>, } fn main() -> Result<()> { let args = Args::parse(); let device = candle_examples::device(args.cpu)?; let model = match args.model { Some(model) => std::path::PathBuf::from(model), None => Api::new()? .model("kyutai/mimi".to_string()) .get("model.safetensors")?, }; let vb = unsafe { VarBuilder::from_mmaped_safetensors(&[model], DType::F32, &device)? }; let config = Config::v0_1(None); let mut model = Model::new(config, vb)?; let codes = match args.action { Action::CodeToAudio => { let codes = candle::safetensors::load(args.in_file, &device)?; codes.get("codes").expect("no codes in input file").clone() } Action::AudioToCode | Action::AudioToAudio => { let pcm = if args.in_file == "-" { println!(">>>> RECORDING AUDIO, PRESS ENTER ONCE DONE <<<<"); let (stream, input_audio) = audio_io::setup_input_stream()?; let mut pcms = vec![]; let stdin = std::thread::spawn(|| { let mut s = String::new(); std::io::stdin().read_line(&mut s) }); while !stdin.is_finished() { let input = input_audio.lock().unwrap().take_all(); if input.is_empty() { std::thread::sleep(std::time::Duration::from_millis(100)); continue; } pcms.push(input) } drop(stream); pcms.concat() } else { let (pcm, sample_rate) = audio_io::pcm_decode(args.in_file)?; if sample_rate != 24_000 { println!("WARNING: mimi uses a 24khz sample rate, input uses {sample_rate}, resampling..."); audio_io::resample(&pcm, sample_rate as usize, 24_000)? } else { pcm } }; match args.streaming { Some(chunk_size) => { let mut code_chunks = vec![]; for pcm in pcm.chunks(chunk_size) { let pcm = Tensor::new(pcm, &device)?.reshape((1, 1, ()))?; let code_chunk = model.encode(&pcm)?; code_chunks.push(code_chunk) } Tensor::cat(&code_chunks, candle::D::Minus1)? } None => { let pcm_len = pcm.len(); let pcm = Tensor::from_vec(pcm, (1, 1, pcm_len), &device)?; println!("input pcm shape: {:?}", pcm.shape()); model.encode(&pcm)? } } } }; println!("codes shape: {:?}", codes.shape()); model.reset_state(); match args.action { Action::AudioToCode => { codes.save_safetensors("codes", &args.out_file)?; } Action::AudioToAudio | Action::CodeToAudio => { let pcm = match args.streaming { Some(chunk_size) => { let seq_len = codes.dim(candle::D::Minus1)?; let mut pcm_chunks = vec![]; for chunk_start in (0..seq_len).step_by(chunk_size) { let chunk_len = usize::min(chunk_size, seq_len - chunk_start); let codes = codes.narrow(candle::D::Minus1, chunk_start, chunk_len)?; let pcm = model.decode_step(&codes.into())?; if let Some(pcm) = pcm.as_option() { pcm_chunks.push(pcm.clone()) } } Tensor::cat(&pcm_chunks, candle::D::Minus1)? } None => model.decode(&codes)?, }; println!("output pcm shape: {:?}", pcm.shape()); let pcm = pcm.i(0)?.i(0)?; let pcm = candle_examples::audio::normalize_loudness(&pcm, 24_000, true)?; let pcm = pcm.to_vec1::<f32>()?; if args.out_file == "-" { let (stream, ad) = audio_io::setup_output_stream()?; { let mut ad = ad.lock().unwrap(); ad.push_samples(&pcm)?; } loop { let ad = ad.lock().unwrap(); if ad.is_empty() { break; } // That's very weird, calling thread::sleep here triggers the stream to stop // playing (the callback doesn't seem to be called anymore). // std::thread::sleep(std::time::Duration::from_millis(100)); } drop(stream) } else { let mut output = std::fs::File::create(&args.out_file)?; candle_examples::wav::write_pcm_as_wav(&mut output, &pcm, 24_000)?; } } } Ok(()) }
7
0
hf_public_repos/candle/candle-examples/examples
hf_public_repos/candle/candle-examples/examples/mimi/audio_io.rs
use anyhow::{Context, Result}; use std::sync::{Arc, Mutex}; pub const SAMPLE_RATE: usize = 24_000; pub(crate) struct AudioOutputData_ { resampled_data: std::collections::VecDeque<f32>, resampler: rubato::FastFixedIn<f32>, output_buffer: Vec<f32>, input_buffer: Vec<f32>, input_len: usize, } impl AudioOutputData_ { pub(crate) fn new(input_sample_rate: usize, output_sample_rate: usize) -> Result<Self> { use rubato::Resampler; let resampled_data = std::collections::VecDeque::with_capacity(output_sample_rate * 10); let resample_ratio = output_sample_rate as f64 / input_sample_rate as f64; let resampler = rubato::FastFixedIn::new( resample_ratio, f64::max(resample_ratio, 1.0), rubato::PolynomialDegree::Septic, 1024, 1, )?; let input_buffer = resampler.input_buffer_allocate(true).remove(0); let output_buffer = resampler.output_buffer_allocate(true).remove(0); Ok(Self { resampled_data, resampler, input_buffer, output_buffer, input_len: 0, }) } pub fn reset(&mut self) { use rubato::Resampler; self.output_buffer.fill(0.); self.input_buffer.fill(0.); self.resampler.reset(); self.resampled_data.clear(); } pub(crate) fn take_all(&mut self) -> Vec<f32> { let mut data = Vec::with_capacity(self.resampled_data.len()); while let Some(elem) = self.resampled_data.pop_back() { data.push(elem); } data } pub(crate) fn is_empty(&self) -> bool { self.resampled_data.is_empty() } // Assumes that the input buffer is large enough. fn push_input_buffer(&mut self, samples: &[f32]) { self.input_buffer[self.input_len..self.input_len + samples.len()].copy_from_slice(samples); self.input_len += samples.len() } pub(crate) fn push_samples(&mut self, samples: &[f32]) -> Result<()> { use rubato::Resampler; let mut pos_in = 0; loop { let rem = self.input_buffer.len() - self.input_len; let pos_end = usize::min(pos_in + rem, samples.len()); self.push_input_buffer(&samples[pos_in..pos_end]); pos_in = pos_end; if self.input_len < self.input_buffer.len() { break; } let (_, out_len) = self.resampler.process_into_buffer( &[&self.input_buffer], &mut [&mut self.output_buffer], None, )?; for &elem in self.output_buffer[..out_len].iter() { self.resampled_data.push_front(elem) } self.input_len = 0; } Ok(()) } } type AudioOutputData = Arc<Mutex<AudioOutputData_>>; pub(crate) fn setup_output_stream() -> Result<(cpal::Stream, AudioOutputData)> { use cpal::traits::{DeviceTrait, HostTrait, StreamTrait}; println!("Setup audio output stream!"); let host = cpal::default_host(); let device = host .default_output_device() .context("no output device available")?; let mut supported_configs_range = device.supported_output_configs()?; let config_range = match supported_configs_range.find(|c| c.channels() == 1) { // On macOS, it's commonly the case that there are only stereo outputs. None => device .supported_output_configs()? .next() .context("no audio output available")?, Some(config_range) => config_range, }; let sample_rate = cpal::SampleRate(SAMPLE_RATE as u32).clamp( config_range.min_sample_rate(), config_range.max_sample_rate(), ); let config: cpal::StreamConfig = config_range.with_sample_rate(sample_rate).into(); let channels = config.channels as usize; println!( "cpal device: {} {} {config:?}", device.name().unwrap_or_else(|_| "unk".to_string()), config.sample_rate.0 ); let audio_data = Arc::new(Mutex::new(AudioOutputData_::new( SAMPLE_RATE, config.sample_rate.0 as usize, )?)); let ad = audio_data.clone(); let stream = device.build_output_stream( &config, move |data: &mut [f32], _: &cpal::OutputCallbackInfo| { data.fill(0.); let mut ad = ad.lock().unwrap(); let mut last_elem = 0f32; for (idx, elem) in data.iter_mut().enumerate() { if idx % channels == 0 { match ad.resampled_data.pop_back() { None => break, Some(v) => { last_elem = v; *elem = v } } } else { *elem = last_elem } } }, move |err| eprintln!("cpal error: {err}"), None, // None=blocking, Some(Duration)=timeout )?; stream.play()?; Ok((stream, audio_data)) } pub(crate) fn setup_input_stream() -> Result<(cpal::Stream, AudioOutputData)> { use cpal::traits::{DeviceTrait, HostTrait, StreamTrait}; println!("Setup audio input stream!"); let host = cpal::default_host(); let device = host .default_input_device() .context("no input device available")?; let mut supported_configs_range = device.supported_input_configs()?; let config_range = supported_configs_range .find(|c| c.channels() == 1) .context("no audio input available")?; let sample_rate = cpal::SampleRate(SAMPLE_RATE as u32).clamp( config_range.min_sample_rate(), config_range.max_sample_rate(), ); let config: cpal::StreamConfig = config_range.with_sample_rate(sample_rate).into(); println!( "cpal device: {} {} {config:?}", device.name().unwrap_or_else(|_| "unk".to_string()), config.sample_rate.0 ); let audio_data = Arc::new(Mutex::new(AudioOutputData_::new( config.sample_rate.0 as usize, SAMPLE_RATE, )?)); let ad = audio_data.clone(); let stream = device.build_input_stream( &config, move |data: &[f32], _: &cpal::InputCallbackInfo| { let mut ad = ad.lock().unwrap(); if let Err(err) = ad.push_samples(data) { eprintln!("error processing audio input {err:?}") } }, move |err| eprintln!("cpal error: {err}"), None, // None=blocking, Some(Duration)=timeout )?; stream.play()?; Ok((stream, audio_data)) } fn conv<T>(samples: &mut Vec<f32>, data: std::borrow::Cow<symphonia::core::audio::AudioBuffer<T>>) where T: symphonia::core::sample::Sample, f32: symphonia::core::conv::FromSample<T>, { use symphonia::core::audio::Signal; use symphonia::core::conv::FromSample; samples.extend(data.chan(0).iter().map(|v| f32::from_sample(*v))) } pub(crate) fn pcm_decode<P: AsRef<std::path::Path>>(path: P) -> Result<(Vec<f32>, u32)> { use symphonia::core::audio::{AudioBufferRef, Signal}; let src = std::fs::File::open(path)?; let mss = symphonia::core::io::MediaSourceStream::new(Box::new(src), Default::default()); let hint = symphonia::core::probe::Hint::new(); let meta_opts: symphonia::core::meta::MetadataOptions = Default::default(); let fmt_opts: symphonia::core::formats::FormatOptions = Default::default(); let probed = symphonia::default::get_probe().format(&hint, mss, &fmt_opts, &meta_opts)?; let mut format = probed.format; let track = format .tracks() .iter() .find(|t| t.codec_params.codec != symphonia::core::codecs::CODEC_TYPE_NULL) .expect("no supported audio tracks"); let mut decoder = symphonia::default::get_codecs() .make(&track.codec_params, &Default::default()) .expect("unsupported codec"); let track_id = track.id; let sample_rate = track.codec_params.sample_rate.unwrap_or(0); let mut pcm_data = Vec::new(); while let Ok(packet) = format.next_packet() { while !format.metadata().is_latest() { format.metadata().pop(); } if packet.track_id() != track_id { continue; } match decoder.decode(&packet)? { AudioBufferRef::F32(buf) => pcm_data.extend(buf.chan(0)), AudioBufferRef::U8(data) => conv(&mut pcm_data, data), AudioBufferRef::U16(data) => conv(&mut pcm_data, data), AudioBufferRef::U24(data) => conv(&mut pcm_data, data), AudioBufferRef::U32(data) => conv(&mut pcm_data, data), AudioBufferRef::S8(data) => conv(&mut pcm_data, data), AudioBufferRef::S16(data) => conv(&mut pcm_data, data), AudioBufferRef::S24(data) => conv(&mut pcm_data, data), AudioBufferRef::S32(data) => conv(&mut pcm_data, data), AudioBufferRef::F64(data) => conv(&mut pcm_data, data), } } Ok((pcm_data, sample_rate)) } pub(crate) fn resample(pcm_in: &[f32], sr_in: usize, sr_out: usize) -> Result<Vec<f32>> { use rubato::Resampler; let mut pcm_out = Vec::with_capacity((pcm_in.len() as f64 * sr_out as f64 / sr_in as f64) as usize + 1024); let mut resampler = rubato::FftFixedInOut::<f32>::new(sr_in, sr_out, 1024, 1)?; let mut output_buffer = resampler.output_buffer_allocate(true); let mut pos_in = 0; while pos_in + resampler.input_frames_next() < pcm_in.len() { let (in_len, out_len) = resampler.process_into_buffer(&[&pcm_in[pos_in..]], &mut output_buffer, None)?; pos_in += in_len; pcm_out.extend_from_slice(&output_buffer[0][..out_len]); } if pos_in < pcm_in.len() { let (_in_len, out_len) = resampler.process_partial_into_buffer( Some(&[&pcm_in[pos_in..]]), &mut output_buffer, None, )?; pcm_out.extend_from_slice(&output_buffer[0][..out_len]); } Ok(pcm_out) }
8
0
hf_public_repos/candle/candle-examples/examples
hf_public_repos/candle/candle-examples/examples/mimi/README.md
# candle-mimi [Mimi](https://huggingface.co/kyutai/mimi) is a state of the art audio compression model using an encoder/decoder architecture with residual vector quantization. The candle implementation supports streaming meaning that it's possible to encode or decode a stream of audio tokens on the flight to provide low latency interaction with an audio model. ## Running one example Generating some audio tokens from an audio files. ```bash wget https://github.com/metavoiceio/metavoice-src/raw/main/assets/bria.mp3 cargo run --example mimi --features mimi --release -- audio-to-code bria.mp3 bria.safetensors ``` And decoding the audio tokens back into a sound file. ```bash cargo run --example mimi --features mimi --release -- code-to-audio bria.safetensors bria.wav ```
9
0
hf_public_repos/api-inference-community/docker_images/asteroid
hf_public_repos/api-inference-community/docker_images/asteroid/tests/test_api.py
import os from typing import Dict from unittest import TestCase, skipIf from app.main import ALLOWED_TASKS, get_pipeline # Must contain at least one example of each implemented pipeline # Tests do not check the actual values of the model output, so small dummy # models are recommended for faster tests. TESTABLE_MODELS: Dict[str, str] = { # IMPLEMENT_THIS # "automatic-speech-recognition": "mysample-ASR", # "text-generation": "mysample-gpt2", "audio-source-separation": "julien-c/DPRNNTasNet-ks16_WHAM_sepclean", "audio-to-audio": "julien-c/DPRNNTasNet-ks16_WHAM_sepclean", } ALL_TASKS = { "automatic-speech-recognition", "audio-source-separation", "feature-extraction", "image-classification", "question-answering", "text-generation", "text-to-speech", } class PipelineTestCase(TestCase): @skipIf( os.path.dirname(os.path.dirname(__file__)).endswith("common"), "common is a special case", ) def test_has_at_least_one_task_enabled(self): self.assertGreater( len(ALLOWED_TASKS.keys()), 0, "You need to implement at least one task" ) def test_unsupported_tasks(self): unsupported_tasks = ALL_TASKS - ALLOWED_TASKS.keys() for unsupported_task in unsupported_tasks: with self.subTest(msg=unsupported_task, task=unsupported_task): with self.assertRaises(EnvironmentError): get_pipeline(unsupported_task, model_id="XX")
0
0
hf_public_repos/api-inference-community/docker_images/asteroid
hf_public_repos/api-inference-community/docker_images/asteroid/tests/test_api_audio_source_separation.py
import os from unittest import TestCase, skipIf from api_inference_community.validation import ffmpeg_read from app.main import ALLOWED_TASKS from starlette.testclient import TestClient from tests.test_api import TESTABLE_MODELS @skipIf( "audio-source-separation" not in ALLOWED_TASKS, "audio-source-separation not implemented", ) class AudioSourceSeparationTestCase(TestCase): def setUp(self): model_id = TESTABLE_MODELS["audio-source-separation"] self.old_model_id = os.getenv("MODEL_ID") self.old_task = os.getenv("TASK") os.environ["MODEL_ID"] = model_id os.environ["TASK"] = "audio-source-separation" from app.main import app self.app = app def tearDown(self): if self.old_model_id is not None: os.environ["MODEL_ID"] = self.old_model_id else: del os.environ["MODEL_ID"] if self.old_task is not None: os.environ["TASK"] = self.old_task else: del os.environ["TASK"] def read(self, filename: str) -> bytes: dirname = os.path.dirname(os.path.abspath(__file__)) filename = os.path.join(dirname, "samples", filename) with open(filename, "rb") as f: bpayload = f.read() return bpayload def test_simple(self): bpayload = self.read("sample1.flac") with TestClient(self.app) as client: response = client.post("/", data=bpayload) self.assertEqual( response.status_code, 200, ) self.assertEqual(response.headers["content-type"], "audio/flac") audio = ffmpeg_read(response.content) self.assertEqual(len(audio.shape), 1) self.assertGreater(audio.shape[0], 1000) def test_malformed_audio(self): bpayload = self.read("malformed.flac") with TestClient(self.app) as client: response = client.post("/", data=bpayload) self.assertEqual( response.status_code, 400, ) self.assertEqual(response.content, b'{"error":"Malformed soundfile"}') def test_dual_channel_audiofile(self): bpayload = self.read("sample1_dual.ogg") with TestClient(self.app) as client: response = client.post("/", data=bpayload) self.assertEqual( response.status_code, 200, ) self.assertEqual(response.header["content-type"], "audio/wav") audio = ffmpeg_read(response.content) self.assertEqual(audio.shape, (10,)) def test_webm_audiofile(self): bpayload = self.read("sample1.webm") with TestClient(self.app) as client: response = client.post("/", data=bpayload) self.assertEqual( response.status_code, 200, ) self.assertEqual(response.header["content-type"], "audio/wav") audio = ffmpeg_read(response.content) self.assertEqual(audio.shape, (10,))
1
0
hf_public_repos/api-inference-community/docker_images
hf_public_repos/api-inference-community/docker_images/mindspore/requirements.txt
starlette==0.27.0 api-inference-community==0.0.25 huggingface_hub==0.11.0 tinyms>=0.3.2
2
0
hf_public_repos/api-inference-community/docker_images
hf_public_repos/api-inference-community/docker_images/mindspore/Dockerfile
FROM tiangolo/uvicorn-gunicorn:python3.8 LABEL maintainer="me <[email protected]>" # Add any system dependency here RUN apt-get update -y && apt-get install libglib2.0-dev libsm6 libxrender1 libgl1-mesa-glx -y COPY requirements.txt /app RUN /usr/local/bin/python -m pip install --upgrade pip && \ pip install --no-cache-dir -r requirements.txt COPY prestart.sh /app/ # Most DL models are quite large in terms of memory, using workers is a HUGE # slowdown because of the fork and GIL with python. # Using multiple pods seems like a better default strategy. # Feel free to override if it does not make sense for your library. ARG max_workers=1 ENV MAX_WORKERS=$max_workers ENV HUGGINGFACE_HUB_CACHE=/data # Necessary on GPU environment docker. # TIMEOUT env variable is used by nvcr.io/nvidia/pytorch:xx for another purpose # rendering TIMEOUT defined by uvicorn impossible to use correctly # We're overriding it to be renamed UVICORN_TIMEOUT # UVICORN_TIMEOUT is a useful variable for very large models that take more # than 30s (the default) to load in memory. # If UVICORN_TIMEOUT is too low, uvicorn will simply never loads as it will # kill workers all the time before they finish. RUN sed -i 's/TIMEOUT/UVICORN_TIMEOUT/g' /gunicorn_conf.py COPY ./app /app/app
3
0
hf_public_repos/api-inference-community/docker_images
hf_public_repos/api-inference-community/docker_images/mindspore/prestart.sh
python app/main.py
4
0
hf_public_repos/api-inference-community/docker_images/mindspore
hf_public_repos/api-inference-community/docker_images/mindspore/app/main.py
import functools import logging import os from typing import Dict, Type from api_inference_community.routes import pipeline_route, status_ok from app.pipelines import ImageClassificationPipeline, Pipeline from starlette.applications import Starlette from starlette.middleware import Middleware from starlette.middleware.gzip import GZipMiddleware from starlette.routing import Route TASK = os.getenv("TASK") MODEL_ID = os.getenv("MODEL_ID") logger = logging.getLogger(__name__) # Add the allowed tasks # Supported tasks are: # - text-generation # - text-classification # - token-classification # - translation # - summarization # - automatic-speech-recognition # - ... # For instance # from app.pipelines import AutomaticSpeechRecognitionPipeline # ALLOWED_TASKS = {"automatic-speech-recognition": AutomaticSpeechRecognitionPipeline} # You can check the requirements and expectations of each pipelines in their respective # directories. Implement directly within the directories. ALLOWED_TASKS: Dict[str, Type[Pipeline]] = { "image-classification": ImageClassificationPipeline } @functools.lru_cache() def get_pipeline() -> Pipeline: task = os.environ["TASK"] model_id = os.environ["MODEL_ID"] if task not in ALLOWED_TASKS: raise EnvironmentError(f"{task} is not a valid pipeline for model : {model_id}") return ALLOWED_TASKS[task](model_id) routes = [ Route("/{whatever:path}", status_ok), Route("/{whatever:path}", pipeline_route, methods=["POST"]), ] middleware = [Middleware(GZipMiddleware, minimum_size=1000)] if os.environ.get("DEBUG", "") == "1": from starlette.middleware.cors import CORSMiddleware middleware.append( Middleware( CORSMiddleware, allow_origins=["*"], allow_headers=["*"], allow_methods=["*"], ) ) app = Starlette(routes=routes, middleware=middleware) @app.on_event("startup") async def startup_event(): logger = logging.getLogger("uvicorn.access") handler = logging.StreamHandler() handler.setFormatter(logging.Formatter("%(asctime)s - %(levelname)s - %(message)s")) logger.handlers = [handler] # Link between `api-inference-community` and framework code. app.get_pipeline = get_pipeline try: get_pipeline() except Exception: # We can fail so we can show exception later. pass if __name__ == "__main__": try: get_pipeline() except Exception: # We can fail so we can show exception later. pass
5
0
hf_public_repos/api-inference-community/docker_images/mindspore/app
hf_public_repos/api-inference-community/docker_images/mindspore/app/pipelines/base.py
from abc import ABC, abstractmethod from typing import Any, Optional class Pipeline(ABC): task: Optional[str] = None model_id: Optional[str] = None @abstractmethod def __init__(self, model_id: str): raise NotImplementedError("Pipelines should implement an __init__ method") @abstractmethod def __call__(self, inputs: Any) -> Any: raise NotImplementedError("Pipelines should implement a __call__ method") class PipelineException(Exception): pass
6
0
hf_public_repos/api-inference-community/docker_images/mindspore/app
hf_public_repos/api-inference-community/docker_images/mindspore/app/pipelines/__init__.py
from app.pipelines.base import Pipeline, PipelineException # isort:skip from app.pipelines.image_classification import ImageClassificationPipeline
7
0
hf_public_repos/api-inference-community/docker_images/mindspore/app
hf_public_repos/api-inference-community/docker_images/mindspore/app/pipelines/image_classification.py
import json import os from typing import TYPE_CHECKING, Any, Dict, List import tinyms as ts from app.pipelines import Pipeline from huggingface_hub import snapshot_download from tinyms import Tensor, model, vision from tinyms.primitives import Softmax if TYPE_CHECKING: from PIL import Image ALLOWED_MODEL = { "LeNet5": model.lenet5, "ResNet50": model.resnet50, "MobileNetV2": model.mobilenetv2, } ALLOWED_TRANSFORM = { "mnist": vision.mnist_transform, "cifar10": vision.cifar10_transform, "imagenet2012": vision.imagefolder_transform, } def load_tranform_func(config): dataset = config.get("dataset_transform") if dataset not in ALLOWED_TRANSFORM: raise EnvironmentError( f"Currently doesn't supports dataset {dataset} transform!" ) return ALLOWED_TRANSFORM.get(dataset) def load_config(config_json_file): with open(config_json_file, "r", encoding="utf-8") as reader: config = reader.read() return json.loads(config) def load_model_config_from_hf(model_id): repo_path = snapshot_download(model_id) config_json_file = os.path.join(repo_path, "config.json") if not os.path.exists(config_json_file): raise EnvironmentError( f"The path of the config.json file {config_json_file} doesn't exist!" ) config = load_config(config_json_file) architecture = config.get("architecture") if architecture not in ALLOWED_MODEL: raise EnvironmentError(f"Currently doesn't supports {model} model!") net_func = ALLOWED_MODEL.get(architecture) class_num = config.get("num_classes") net = net_func(class_num=class_num, is_training=False) ms_model = model.Model(net) model_file = os.path.join(repo_path, "mindspore_model.ckpt") if not os.path.exists(model_file): raise EnvironmentError( f"The path of the model file {model_file} doesn't exist!" ) ms_model.load_checkpoint(model_file) return ms_model, config class ImageClassificationPipeline(Pipeline): def __init__(self, model_id: str): self.model, self.config = load_model_config_from_hf(model_id) # Obtain labels self.id2label = self.config.get("id2label") # Get dataset transform function self.tranform_func = load_tranform_func(self.config) # Return at most the top 5 predicted classes self.top_k = 5 def __call__(self, inputs: "Image.Image") -> List[Dict[str, Any]]: """ Args: inputs (:obj:`PIL.Image`): The raw image representation as PIL. No transformation made whatsoever from the input. Make all necessary transformations here. Return: A :obj:`list`:. The list contains items that are dicts should be liked {"label": "XXX", "score": 0.82} It is preferred if the returned list is in decreasing `score` order """ # Preprocess data img_data = self.tranform_func(inputs) input_data = ts.array(img_data.tolist(), dtype=img_data.dtype.name) # Execute model prediction preds = self.model.predict(ts.expand_dims(input_data, 0)) # Postprocess data softmax = Softmax() pred_outputs = softmax(Tensor(preds, dtype=ts.float32)).asnumpy() labels = [ {"label": str(self.id2label[str(i)]), "score": float(pred_outputs[0][i])} for i in range(len(pred_outputs[0])) ] return sorted(labels, key=lambda tup: tup["score"], reverse=True)[: self.top_k]
8
0
hf_public_repos/api-inference-community/docker_images/mindspore
hf_public_repos/api-inference-community/docker_images/mindspore/tests/test_docker_build.py
import os import subprocess from unittest import TestCase class cd: """Context manager for changing the current working directory""" def __init__(self, newPath): self.newPath = os.path.expanduser(newPath) def __enter__(self): self.savedPath = os.getcwd() os.chdir(self.newPath) def __exit__(self, etype, value, traceback): os.chdir(self.savedPath) class DockerBuildTestCase(TestCase): def test_can_build_docker_image(self): with cd(os.path.dirname(os.path.dirname(__file__))): subprocess.check_output(["docker", "build", "."])
9
0
hf_public_repos/candle/candle-transformers/src
hf_public_repos/candle/candle-transformers/src/models/olmo.rs
//! OLMo (Open Language Model) implementation //! //! See OLMo model details at: //! - [Hugging Face](https://huggingface.co/allenai/OLMo) //! - [OLMo Paper](https://allenai.org/olmo) //! //! The model uses: //! - RoPE embeddings //! - Sliding window attention //! - Transformer architecture //! //! References: //! - [Hugging Face Implementation](https://huggingface.co/allenai/OLMo) //! - [OLMo Paper](https://allenai.org/olmo) //! use candle::{DType, Device, Module, Result, Tensor, D}; use candle_nn::{linear_b, linear_no_bias, Activation, LayerNorm, Linear, VarBuilder}; use std::sync::Arc; #[derive(Debug, Clone, serde::Deserialize)] pub struct Config { pub vocab_size: usize, pub hidden_size: usize, pub intermediate_size: usize, pub attention_bias: bool, pub num_hidden_layers: usize, pub num_attention_heads: usize, pub num_key_value_heads: usize, pub hidden_act: candle_nn::Activation, pub max_position_embeddings: usize, pub rope_theta: f64, pub tie_word_embeddings: bool, pub clip_qkv: Option<f64>, } #[derive(Debug, Clone)] struct RotaryEmbedding { sin: Tensor, cos: Tensor, } impl RotaryEmbedding { fn new(dtype: DType, cfg: &Config, dev: &Device) -> Result<Self> { let dim = cfg.hidden_size / cfg.num_attention_heads; let max_seq_len = cfg.max_position_embeddings; let inv_freq: Vec<_> = (0..dim) .step_by(2) .map(|i| 1f32 / cfg.rope_theta.powf(i as f64 / dim as f64) as f32) .collect(); let inv_freq_len = inv_freq.len(); let inv_freq = Tensor::from_vec(inv_freq, (1, inv_freq_len), dev)?.to_dtype(dtype)?; let t = Tensor::arange(0u32, max_seq_len as u32, dev)? .to_dtype(dtype)? .reshape((max_seq_len, 1))?; let freqs = t.matmul(&inv_freq)?; Ok(Self { sin: freqs.sin()?, cos: freqs.cos()?, }) } fn apply_rotary_emb_qkv( &self, q: &Tensor, k: &Tensor, seqlen_offset: usize, ) -> Result<(Tensor, Tensor)> { let (_b_sz, _h, seq_len, _n_embd) = q.dims4()?; let cos = self.cos.narrow(0, seqlen_offset, seq_len)?; let sin = self.sin.narrow(0, seqlen_offset, seq_len)?; let q_embed = candle_nn::rotary_emb::rope(&q.contiguous()?, &cos, &sin)?; let k_embed = candle_nn::rotary_emb::rope(&k.contiguous()?, &cos, &sin)?; Ok((q_embed, k_embed)) } } #[derive(Debug, Clone)] #[allow(clippy::upper_case_acronyms)] struct MLP { gate_proj: Linear, up_proj: Linear, down_proj: Linear, act_fn: Activation, } impl MLP { fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> { let hidden_sz = cfg.hidden_size; let intermediate_sz = cfg.intermediate_size; let gate_proj = linear_no_bias(hidden_sz, intermediate_sz, vb.pp("gate_proj"))?; let up_proj = linear_no_bias(hidden_sz, intermediate_sz, vb.pp("up_proj"))?; let down_proj = linear_no_bias(intermediate_sz, hidden_sz, vb.pp("down_proj"))?; Ok(Self { gate_proj, up_proj, down_proj, act_fn: cfg.hidden_act, }) } } impl Module for MLP { fn forward(&self, xs: &Tensor) -> Result<Tensor> { let lhs = xs.apply(&self.gate_proj)?.apply(&self.act_fn)?; let rhs = xs.apply(&self.up_proj)?; (lhs * rhs)?.apply(&self.down_proj) } } #[derive(Debug, Clone)] struct Attention { q_proj: Linear, k_proj: Linear, v_proj: Linear, o_proj: Linear, num_heads: usize, num_kv_heads: usize, num_kv_groups: usize, head_dim: usize, hidden_size: usize, rotary_emb: Arc<RotaryEmbedding>, qkv_clip: Option<f64>, kv_cache: Option<(Tensor, Tensor)>, } impl Attention { fn new(rotary_emb: Arc<RotaryEmbedding>, cfg: &Config, vb: VarBuilder) -> Result<Self> { let hidden_sz = cfg.hidden_size; let num_heads = cfg.num_attention_heads; let num_kv_heads = cfg.num_key_value_heads; let num_kv_groups = num_heads / num_kv_heads; let head_dim = hidden_sz / num_heads; let b = cfg.attention_bias; let qkv_clip = cfg.clip_qkv; let q_proj = linear_b(hidden_sz, num_heads * head_dim, b, vb.pp("q_proj"))?; let k_proj = linear_b(hidden_sz, num_kv_heads * head_dim, b, vb.pp("k_proj"))?; let v_proj = linear_b(hidden_sz, num_kv_heads * head_dim, b, vb.pp("v_proj"))?; let o_proj = linear_b(num_heads * head_dim, hidden_sz, b, vb.pp("o_proj"))?; Ok(Self { q_proj, k_proj, v_proj, o_proj, num_heads, num_kv_heads, num_kv_groups, head_dim, hidden_size: hidden_sz, rotary_emb, qkv_clip, kv_cache: None, }) } fn forward( &mut self, xs: &Tensor, attention_mask: Option<&Tensor>, seqlen_offset: usize, ) -> Result<Tensor> { let (b_sz, q_len, _) = xs.dims3()?; let query_states = self.q_proj.forward(xs)?; let key_states = self.k_proj.forward(xs)?; let value_states = self.v_proj.forward(xs)?; let (query_states, key_states, value_states) = match &self.qkv_clip { None => (query_states, key_states, value_states), Some(qkv_clip) => { let query_states = Tensor::clamp(&query_states, -qkv_clip, *qkv_clip)?; let key_states = Tensor::clamp(&key_states, -qkv_clip, *qkv_clip)?; let value_states = Tensor::clamp(&value_states, -qkv_clip, *qkv_clip)?; (query_states, key_states, value_states) } }; let query_states = query_states .reshape((b_sz, q_len, self.num_heads, self.head_dim))? .transpose(1, 2)?; let key_states = key_states .reshape((b_sz, q_len, self.num_kv_heads, self.head_dim))? .transpose(1, 2)?; let value_states = value_states .reshape((b_sz, q_len, self.num_kv_heads, self.head_dim))? .transpose(1, 2)?; let (query_states, key_states) = self.rotary_emb .apply_rotary_emb_qkv(&query_states, &key_states, seqlen_offset)?; let (key_states, value_states) = match &self.kv_cache { None => (key_states, value_states), Some((prev_k, prev_v)) => { let key_states = Tensor::cat(&[prev_k, &key_states], 2)?; let value_states = Tensor::cat(&[prev_v, &value_states], 2)?; (key_states, value_states) } }; self.kv_cache = Some((key_states.clone(), value_states.clone())); let key_states = crate::utils::repeat_kv(key_states, self.num_kv_groups)?.contiguous()?; let value_states = crate::utils::repeat_kv(value_states, self.num_kv_groups)?.contiguous()?; let attn_output = { let scale = 1f64 / f64::sqrt(self.head_dim as f64); let attn_weights = (query_states.matmul(&key_states.transpose(2, 3)?)? * scale)?; let attn_weights = match attention_mask { None => attn_weights, Some(mask) => attn_weights.broadcast_add(mask)?, }; let attn_weights = candle_nn::ops::softmax_last_dim(&attn_weights)?; attn_weights.matmul(&value_states)? }; attn_output .transpose(1, 2)? .reshape((b_sz, q_len, self.hidden_size))? .apply(&self.o_proj) } fn clear_kv_cache(&mut self) { self.kv_cache = None } } #[derive(Debug, Clone)] struct DecoderLayer { self_attn: Attention, mlp: MLP, input_layernorm: LayerNorm, post_attention_layernorm: LayerNorm, } impl DecoderLayer { fn new(rotary_emb: Arc<RotaryEmbedding>, cfg: &Config, vb: VarBuilder) -> Result<Self> { let self_attn = Attention::new(rotary_emb, cfg, vb.pp("self_attn"))?; let mlp = MLP::new(cfg, vb.pp("mlp"))?; let ln_weight = Tensor::ones(cfg.hidden_size, vb.dtype(), vb.device())?; let input_layernorm = LayerNorm::new_no_bias(ln_weight.clone(), 1e-5); let post_attention_layernorm = LayerNorm::new_no_bias(ln_weight.clone(), 1e-5); Ok(Self { self_attn, mlp, input_layernorm, post_attention_layernorm, }) } fn forward( &mut self, xs: &Tensor, attention_mask: Option<&Tensor>, seqlen_offset: usize, ) -> Result<Tensor> { let residual = xs; let xs = self.input_layernorm.forward(xs)?; let xs = self.self_attn.forward(&xs, attention_mask, seqlen_offset)?; let xs = (xs + residual)?; let residual = &xs; let xs = xs.apply(&self.post_attention_layernorm)?.apply(&self.mlp)?; residual + xs } fn clear_kv_cache(&mut self) { self.self_attn.clear_kv_cache() } } #[derive(Debug, Clone)] pub struct Model { embed_tokens: candle_nn::Embedding, layers: Vec<DecoderLayer>, norm: LayerNorm, lm_head: Linear, device: Device, dtype: DType, } impl Model { pub fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> { let vb_m = vb.pp("model"); let embed_tokens = candle_nn::embedding(cfg.vocab_size, cfg.hidden_size, vb_m.pp("embed_tokens"))?; let rotary_emb = Arc::new(RotaryEmbedding::new(vb.dtype(), cfg, vb_m.device())?); let mut layers = Vec::with_capacity(cfg.num_hidden_layers); let vb_l = vb_m.pp("layers"); for layer_idx in 0..cfg.num_hidden_layers { let layer = DecoderLayer::new(rotary_emb.clone(), cfg, vb_l.pp(layer_idx))?; layers.push(layer) } let ln_weight = Tensor::ones(cfg.hidden_size, vb.dtype(), vb.device())?; let norm = LayerNorm::new_no_bias(ln_weight, 1e-5); let lm_head = if cfg.tie_word_embeddings { Linear::new(embed_tokens.embeddings().clone(), None) } else { linear_no_bias(cfg.hidden_size, cfg.vocab_size, vb.pp("lm_head"))? }; Ok(Self { embed_tokens, layers, norm, lm_head, device: vb.device().clone(), dtype: vb.dtype(), }) } fn prepare_decoder_attention_mask( &self, b_size: usize, tgt_len: usize, seqlen_offset: usize, ) -> Result<Tensor> { // Sliding window mask? let mask: Vec<_> = (0..tgt_len) .flat_map(|i| (0..tgt_len).map(move |j| if i < j { f32::NEG_INFINITY } else { 0. })) .collect(); let mask = Tensor::from_slice(&mask, (tgt_len, tgt_len), &self.device)?; let mask = if seqlen_offset > 0 { let mask0 = Tensor::zeros((tgt_len, seqlen_offset), self.dtype, &self.device)?; Tensor::cat(&[&mask0, &mask], D::Minus1)? } else { mask }; mask.expand((b_size, 1, tgt_len, tgt_len + seqlen_offset))? .to_dtype(self.dtype) } pub fn forward(&mut self, input_ids: &Tensor, seqlen_offset: usize) -> Result<Tensor> { let (b_size, seq_len) = input_ids.dims2()?; let attention_mask = if seq_len <= 1 { None } else { let mask = self.prepare_decoder_attention_mask(b_size, seq_len, seqlen_offset)?; Some(mask) }; let mut xs = self.embed_tokens.forward(input_ids)?; for layer in self.layers.iter_mut() { xs = layer.forward(&xs, attention_mask.as_ref(), seqlen_offset)? } xs.narrow(1, seq_len - 1, 1)? .apply(&self.norm)? .apply(&self.lm_head) } pub fn clear_kv_cache(&mut self) { for layer in self.layers.iter_mut() { layer.clear_kv_cache() } } }
0
0
hf_public_repos/candle/candle-transformers/src
hf_public_repos/candle/candle-transformers/src/models/blip.rs
//! Based on the BLIP paper from Salesforce Research. //! //! The blip-image-captioning model can generate captions for an input image. //! //! - ⚡ [Interactive Wasm Example](https://huggingface.co/spaces/radames/Candle-BLIP-Image-Captioning) //! - 💻 [GH Link](https://github.com/salesforce/BLIP) //! - 🤗 [HF Link](https://huggingface.co/Salesforce/blip-image-captioning-base) //! - 📝 [Paper](https://arxiv.org/abs/2201.12086) //! use super::blip_text; use super::with_tracing::{conv2d, linear, Conv2d, Linear}; use candle::{Module, Result, Tensor, D}; use candle_nn::{layer_norm, Conv2dConfig, LayerNorm, VarBuilder}; use serde::Deserialize; #[derive(Debug, Clone, Deserialize)] pub struct VisionConfig { pub hidden_size: usize, pub intermediate_size: usize, pub projection_dim: usize, pub num_hidden_layers: usize, pub num_attention_heads: usize, pub image_size: usize, pub patch_size: usize, pub hidden_act: candle_nn::Activation, pub layer_norm_eps: f64, } #[derive(Debug, Clone, Deserialize)] pub struct Config { pub text_config: blip_text::Config, pub vision_config: VisionConfig, pub projection_dim: usize, pub image_text_hidden_size: usize, } impl Config { pub fn image_captioning_large() -> Self { let text_config = blip_text::Config { vocab_size: 30524, hidden_size: 768, encoder_hidden_size: 1024, intermediate_size: 3072, projection_dim: 768, num_hidden_layers: 12, num_attention_heads: 12, max_position_embeddings: 512, hidden_act: candle_nn::Activation::Gelu, layer_norm_eps: 1e-12, is_decoder: true, }; let vision_config = VisionConfig { hidden_size: 1024, intermediate_size: 4096, projection_dim: 512, num_hidden_layers: 24, num_attention_heads: 16, image_size: 384, patch_size: 16, hidden_act: candle_nn::Activation::Gelu, layer_norm_eps: 1e-5, }; Self { text_config, vision_config, projection_dim: 512, image_text_hidden_size: 256, } } } #[derive(Debug, Clone)] struct VisionEmbeddings { class_embedding: Tensor, patch_embedding: Conv2d, position_embedding: Tensor, } impl VisionEmbeddings { fn new(cfg: &VisionConfig, vb: VarBuilder) -> Result<Self> { let class_embedding = vb.get((1, 1, cfg.hidden_size), "class_embedding")?; let conv_cfg = Conv2dConfig { stride: cfg.patch_size, ..Default::default() }; let patch_embedding = conv2d( 3, cfg.hidden_size, cfg.patch_size, conv_cfg, vb.pp("patch_embedding"), )?; let num_patches1 = cfg.image_size / cfg.patch_size; let num_patches = num_patches1 * num_patches1; let num_positions = num_patches + 1; let position_embedding = vb.get((1, num_positions, cfg.hidden_size), "position_embedding")?; Ok(Self { class_embedding, patch_embedding, position_embedding, }) } } impl Module for VisionEmbeddings { fn forward(&self, xs: &Tensor) -> Result<Tensor> { let target_dtype = xs.dtype(); let b_size = xs.dim(0)?; let patch_embeds = xs.apply(&self.patch_embedding)?.flatten_from(2)?.t()?; let d = self.class_embedding.dim(D::Minus1)?; let class_embeds = self .class_embedding .broadcast_as((b_size, 1, d))? .to_dtype(target_dtype)?; let embeddings = Tensor::cat(&[&class_embeds, &patch_embeds], 1)?; let position_embedding = self.position_embedding.narrow(1, 0, embeddings.dim(1)?)?; embeddings.broadcast_add(&position_embedding) } } #[derive(Debug, Clone)] struct Attention { qkv: Linear, projection: Linear, scale: f64, num_heads: usize, } impl Attention { fn new(cfg: &VisionConfig, vb: VarBuilder) -> Result<Self> { let embed_dim = cfg.hidden_size; let num_heads = cfg.num_attention_heads; let head_dim = embed_dim / num_heads; let scale = 1f64 / (head_dim as f64).sqrt(); let qkv = linear(embed_dim, 3 * embed_dim, vb.pp("qkv"))?; let projection = linear(embed_dim, embed_dim, vb.pp("projection"))?; Ok(Self { qkv, projection, scale, num_heads, }) } fn forward(&self, xs: &Tensor, attn_mask: Option<&Tensor>) -> Result<Tensor> { let (b_sz, tgt_len, embed_dim) = xs.dims3()?; let mixed_qkv = xs .apply(&self.qkv)? .reshape((b_sz, tgt_len, 3, self.num_heads, embed_dim / self.num_heads))? .permute((2, 0, 3, 1, 4))?; let query = mixed_qkv.get(0)?; let key = mixed_qkv.get(1)?; let value = mixed_qkv.get(2)?; let attention_scores = query.matmul(&key.t()?)?; let attention_scores = (attention_scores * self.scale)?; let attention_probs = candle_nn::ops::softmax_last_dim(&attention_scores)?; let attention_probs = match attn_mask { None => attention_probs, Some(attn_mask) => (attention_probs * attn_mask)?, }; attention_probs .matmul(&value)? .permute((0, 2, 1, 3))? .flatten_from(D::Minus2)? .apply(&self.projection) } } #[derive(Debug, Clone)] #[allow(clippy::upper_case_acronyms)] struct MLP { activation_fn: candle_nn::Activation, fc1: Linear, fc2: Linear, } impl MLP { fn new(cfg: &VisionConfig, vb: VarBuilder) -> Result<Self> { let fc1 = linear(cfg.hidden_size, cfg.intermediate_size, vb.pp("fc1"))?; let fc2 = linear(cfg.intermediate_size, cfg.hidden_size, vb.pp("fc2"))?; Ok(Self { activation_fn: cfg.hidden_act, fc1, fc2, }) } } impl Module for MLP { fn forward(&self, xs: &Tensor) -> Result<Tensor> { xs.apply(&self.fc1)? .apply(&self.activation_fn)? .apply(&self.fc2) } } #[derive(Debug, Clone)] struct EncoderLayer { self_attn: Attention, layer_norm1: LayerNorm, mlp: MLP, layer_norm2: LayerNorm, } impl EncoderLayer { fn new(cfg: &VisionConfig, vb: VarBuilder) -> Result<Self> { let embed_dim = cfg.hidden_size; let self_attn = Attention::new(cfg, vb.pp("self_attn"))?; let layer_norm1 = layer_norm(embed_dim, cfg.layer_norm_eps, vb.pp("layer_norm1"))?; let layer_norm2 = layer_norm(embed_dim, cfg.layer_norm_eps, vb.pp("layer_norm2"))?; let mlp = MLP::new(cfg, vb.pp("mlp"))?; Ok(Self { self_attn, layer_norm1, mlp, layer_norm2, }) } fn forward(&self, xs: &Tensor, attention_mask: Option<&Tensor>) -> Result<Tensor> { let residual = xs; let xs = xs.apply(&self.layer_norm1)?; let xs = self.self_attn.forward(&xs, attention_mask)?; let xs = (xs + residual)?; let residual = &xs; let xs = xs.apply(&self.layer_norm2)?.apply(&self.mlp)?; xs + residual } } #[derive(Debug, Clone)] struct Encoder { layers: Vec<EncoderLayer>, } impl Encoder { fn new(cfg: &VisionConfig, vb: VarBuilder) -> Result<Self> { let mut layers = Vec::with_capacity(cfg.num_hidden_layers); let vb = vb.pp("layers"); for i in 0..cfg.num_hidden_layers { let layer = EncoderLayer::new(cfg, vb.pp(i))?; layers.push(layer) } Ok(Self { layers }) } fn forward(&self, xs: &Tensor, attention_mask: Option<&Tensor>) -> Result<Tensor> { let mut xs = xs.clone(); for layer in self.layers.iter() { xs = layer.forward(&xs, attention_mask)? } Ok(xs) } } #[derive(Debug, Clone)] pub struct VisionModel { embeddings: VisionEmbeddings, encoder: Encoder, post_layernorm: LayerNorm, } impl VisionModel { fn new(cfg: &VisionConfig, vb: VarBuilder) -> Result<Self> { let embeddings = VisionEmbeddings::new(cfg, vb.pp("embeddings"))?; let encoder = Encoder::new(cfg, vb.pp("encoder"))?; let post_layernorm = layer_norm(cfg.hidden_size, cfg.layer_norm_eps, vb.pp("post_layernorm"))?; Ok(Self { embeddings, encoder, post_layernorm, }) } } impl Module for VisionModel { fn forward(&self, xs: &Tensor) -> Result<Tensor> { let xs = xs.apply(&self.embeddings)?; let encoder_outputs = self.encoder.forward(&xs, None)?; // Return the last hidden state rather than pooled outputs. encoder_outputs.apply(&self.post_layernorm) } } #[derive(Debug, Clone)] pub struct BlipForConditionalGeneration { vision_model: VisionModel, text_decoder: blip_text::TextLMHeadModel, } impl BlipForConditionalGeneration { pub fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> { let vision_model = VisionModel::new(&cfg.vision_config, vb.pp("vision_model"))?; let text_decoder = blip_text::TextLMHeadModel::new(&cfg.text_config, vb.pp("text_decoder"))?; Ok(Self { vision_model, text_decoder, }) } pub fn vision_model(&self) -> &VisionModel { &self.vision_model } pub fn text_decoder(&mut self) -> &mut blip_text::TextLMHeadModel { &mut self.text_decoder } pub fn reset_kv_cache(&mut self) { self.text_decoder.reset_kv_cache(); } }
1
0
hf_public_repos/candle/candle-transformers/src
hf_public_repos/candle/candle-transformers/src/models/qwen2.rs
//! Qwen2 model implementation with quantization support. //! //! Qwen2 is a large language model from Alibaba optimized for efficiency. //! This implementation provides quantization for reduced memory and compute. //! //! Key characteristics: //! - Streaming decode support //! - Grouped query attention (GQA) //! - RMSNorm for layer normalization //! - Rotary positional embeddings (RoPE) //! - Support for 8-bit quantization //! //! References: //! - 🤗 [Qwen2 Model](https://huggingface.co/Qwen/Qwen2-7B) //! use crate::models::with_tracing::{linear, linear_no_bias, Linear, RmsNorm}; use candle::{DType, Device, IndexOp, Module, Result, Tensor, D}; use candle_nn::{Activation, VarBuilder}; use std::sync::Arc; #[derive(Debug, Clone, PartialEq, serde::Deserialize)] pub struct Config { pub vocab_size: usize, pub hidden_size: usize, pub intermediate_size: usize, pub num_hidden_layers: usize, pub num_attention_heads: usize, pub num_key_value_heads: usize, pub max_position_embeddings: usize, pub sliding_window: usize, pub max_window_layers: usize, pub tie_word_embeddings: bool, pub rope_theta: f64, pub rms_norm_eps: f64, pub use_sliding_window: bool, pub hidden_act: Activation, } #[derive(Debug, Clone)] struct RotaryEmbedding { sin: Tensor, cos: Tensor, } impl RotaryEmbedding { fn new(dtype: DType, cfg: &Config, dev: &Device) -> Result<Self> { let dim = cfg.hidden_size / cfg.num_attention_heads; let max_seq_len = cfg.max_position_embeddings; let inv_freq: Vec<_> = (0..dim) .step_by(2) .map(|i| 1f32 / cfg.rope_theta.powf(i as f64 / dim as f64) as f32) .collect(); let inv_freq_len = inv_freq.len(); let inv_freq = Tensor::from_vec(inv_freq, (1, inv_freq_len), dev)?.to_dtype(dtype)?; let t = Tensor::arange(0u32, max_seq_len as u32, dev)? .to_dtype(dtype)? .reshape((max_seq_len, 1))?; let freqs = t.matmul(&inv_freq)?; Ok(Self { sin: freqs.sin()?, cos: freqs.cos()?, }) } fn apply_rotary_emb_qkv( &self, q: &Tensor, k: &Tensor, seqlen_offset: usize, ) -> Result<(Tensor, Tensor)> { let (_b_sz, _h, seq_len, _n_embd) = q.dims4()?; let cos = self.cos.narrow(0, seqlen_offset, seq_len)?; let sin = self.sin.narrow(0, seqlen_offset, seq_len)?; let q_embed = candle_nn::rotary_emb::rope(&q.contiguous()?, &cos, &sin)?; let k_embed = candle_nn::rotary_emb::rope(&k.contiguous()?, &cos, &sin)?; Ok((q_embed, k_embed)) } } #[derive(Debug, Clone)] #[allow(clippy::upper_case_acronyms)] struct MLP { gate_proj: Linear, up_proj: Linear, down_proj: Linear, act_fn: Activation, } impl MLP { fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> { let hidden_sz = cfg.hidden_size; let intermediate_sz = cfg.intermediate_size; let gate_proj = linear_no_bias(hidden_sz, intermediate_sz, vb.pp("gate_proj"))?; let up_proj = linear_no_bias(hidden_sz, intermediate_sz, vb.pp("up_proj"))?; let down_proj = linear_no_bias(intermediate_sz, hidden_sz, vb.pp("down_proj"))?; Ok(Self { gate_proj, up_proj, down_proj, act_fn: cfg.hidden_act, }) } } impl Module for MLP { fn forward(&self, xs: &Tensor) -> Result<Tensor> { let lhs = xs.apply(&self.gate_proj)?.apply(&self.act_fn)?; let rhs = xs.apply(&self.up_proj)?; (lhs * rhs)?.apply(&self.down_proj) } } #[derive(Debug, Clone)] struct Attention { q_proj: Linear, k_proj: Linear, v_proj: Linear, o_proj: Linear, num_heads: usize, num_kv_heads: usize, num_kv_groups: usize, head_dim: usize, hidden_size: usize, rotary_emb: Arc<RotaryEmbedding>, kv_cache: Option<(Tensor, Tensor)>, } impl Attention { fn new(rotary_emb: Arc<RotaryEmbedding>, cfg: &Config, vb: VarBuilder) -> Result<Self> { let hidden_sz = cfg.hidden_size; let num_heads = cfg.num_attention_heads; let num_kv_heads = cfg.num_key_value_heads; let num_kv_groups = num_heads / num_kv_heads; let head_dim = hidden_sz / num_heads; let q_proj = linear(hidden_sz, num_heads * head_dim, vb.pp("q_proj"))?; let k_proj = linear(hidden_sz, num_kv_heads * head_dim, vb.pp("k_proj"))?; let v_proj = linear(hidden_sz, num_kv_heads * head_dim, vb.pp("v_proj"))?; let o_proj = linear_no_bias(num_heads * head_dim, hidden_sz, vb.pp("o_proj"))?; Ok(Self { q_proj, k_proj, v_proj, o_proj, num_heads, num_kv_heads, num_kv_groups, head_dim, hidden_size: hidden_sz, rotary_emb, kv_cache: None, }) } fn forward( &mut self, xs: &Tensor, attention_mask: Option<&Tensor>, seqlen_offset: usize, ) -> Result<Tensor> { let (b_sz, q_len, _) = xs.dims3()?; let query_states = self.q_proj.forward(xs)?; let key_states = self.k_proj.forward(xs)?; let value_states = self.v_proj.forward(xs)?; let query_states = query_states .reshape((b_sz, q_len, self.num_heads, self.head_dim))? .transpose(1, 2)?; let key_states = key_states .reshape((b_sz, q_len, self.num_kv_heads, self.head_dim))? .transpose(1, 2)?; let value_states = value_states .reshape((b_sz, q_len, self.num_kv_heads, self.head_dim))? .transpose(1, 2)?; let (query_states, key_states) = self.rotary_emb .apply_rotary_emb_qkv(&query_states, &key_states, seqlen_offset)?; let (key_states, value_states) = match &self.kv_cache { None => (key_states, value_states), Some((prev_k, prev_v)) => { let key_states = Tensor::cat(&[prev_k, &key_states], 2)?; let value_states = Tensor::cat(&[prev_v, &value_states], 2)?; (key_states, value_states) } }; self.kv_cache = Some((key_states.clone(), value_states.clone())); let key_states = crate::utils::repeat_kv(key_states, self.num_kv_groups)?.contiguous()?; let value_states = crate::utils::repeat_kv(value_states, self.num_kv_groups)?.contiguous()?; let attn_output = { let scale = 1f64 / f64::sqrt(self.head_dim as f64); let attn_weights = (query_states.matmul(&key_states.transpose(2, 3)?)? * scale)?; let attn_weights = match attention_mask { None => attn_weights, Some(mask) => attn_weights.broadcast_add(mask)?, }; let attn_weights = candle_nn::ops::softmax_last_dim(&attn_weights)?; attn_weights.matmul(&value_states)? }; attn_output .transpose(1, 2)? .reshape((b_sz, q_len, self.hidden_size))? .apply(&self.o_proj) } fn clear_kv_cache(&mut self) { self.kv_cache = None } } #[derive(Debug, Clone)] struct DecoderLayer { self_attn: Attention, mlp: MLP, input_layernorm: RmsNorm, post_attention_layernorm: RmsNorm, } impl DecoderLayer { fn new(rotary_emb: Arc<RotaryEmbedding>, cfg: &Config, vb: VarBuilder) -> Result<Self> { let self_attn = Attention::new(rotary_emb, cfg, vb.pp("self_attn"))?; let mlp = MLP::new(cfg, vb.pp("mlp"))?; let input_layernorm = RmsNorm::new(cfg.hidden_size, cfg.rms_norm_eps, vb.pp("input_layernorm"))?; let post_attention_layernorm = RmsNorm::new( cfg.hidden_size, cfg.rms_norm_eps, vb.pp("post_attention_layernorm"), )?; Ok(Self { self_attn, mlp, input_layernorm, post_attention_layernorm, }) } fn forward( &mut self, xs: &Tensor, attention_mask: Option<&Tensor>, seqlen_offset: usize, ) -> Result<Tensor> { let residual = xs; let xs = self.input_layernorm.forward(xs)?; let xs = self.self_attn.forward(&xs, attention_mask, seqlen_offset)?; let xs = (xs + residual)?; let residual = &xs; let xs = xs.apply(&self.post_attention_layernorm)?.apply(&self.mlp)?; residual + xs } fn clear_kv_cache(&mut self) { self.self_attn.clear_kv_cache() } } #[derive(Debug, Clone)] pub struct Model { embed_tokens: candle_nn::Embedding, layers: Vec<DecoderLayer>, norm: RmsNorm, sliding_window: usize, device: Device, dtype: DType, } impl Model { pub fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> { let vb_m = vb.pp("model"); let embed_tokens = candle_nn::embedding(cfg.vocab_size, cfg.hidden_size, vb_m.pp("embed_tokens"))?; let rotary_emb = Arc::new(RotaryEmbedding::new(vb.dtype(), cfg, vb_m.device())?); let mut layers = Vec::with_capacity(cfg.num_hidden_layers); let vb_l = vb_m.pp("layers"); for layer_idx in 0..cfg.num_hidden_layers { let layer = DecoderLayer::new(rotary_emb.clone(), cfg, vb_l.pp(layer_idx))?; layers.push(layer) } let norm = RmsNorm::new(cfg.hidden_size, cfg.rms_norm_eps, vb_m.pp("norm"))?; Ok(Self { embed_tokens, layers, norm, sliding_window: cfg.sliding_window, device: vb.device().clone(), dtype: vb.dtype(), }) } fn prepare_causal_attention_mask( &self, b_size: usize, tgt_len: usize, seqlen_offset: usize, ) -> Result<Tensor> { // Sliding window mask? let mask: Vec<_> = (0..tgt_len) .flat_map(|i| { (0..tgt_len).map(move |j| { if i < j || j + self.sliding_window < i { f32::NEG_INFINITY } else { 0. } }) }) .collect(); let mask = Tensor::from_slice(&mask, (tgt_len, tgt_len), &self.device)?; let mask = if seqlen_offset > 0 { let mask0 = Tensor::zeros((tgt_len, seqlen_offset), self.dtype, &self.device)?; Tensor::cat(&[&mask0, &mask], D::Minus1)? } else { mask }; mask.expand((b_size, 1, tgt_len, tgt_len + seqlen_offset))? .to_dtype(self.dtype) } fn prepare_attention_mask(&self, attn_mask: &Tensor) -> Result<Tensor> { let (b_sz, sql_len) = attn_mask.dims2()?; let mut mask: Vec<Tensor> = vec![]; for b in 0..b_sz { mask.push(attn_mask.i((b, ..))?.expand((1, 1, sql_len, sql_len))?); } let mask = Tensor::cat(&mask, 0)?; let on_true = mask.zeros_like()?.to_dtype(self.dtype)?; let on_false = Tensor::new(f32::NEG_INFINITY, &self.device)? .broadcast_as(mask.shape())? .to_dtype(self.dtype)?; mask.where_cond(&on_true, &on_false) } pub fn forward( &mut self, input_ids: &Tensor, seqlen_offset: usize, attn_mask: Option<&Tensor>, ) -> Result<Tensor> { let (b_size, seq_len) = input_ids.dims2()?; let attention_mask: Option<Tensor> = match attn_mask { Some(mask) => Some(self.prepare_attention_mask(mask)?), None => { if seq_len <= 1 { None } else { Some(self.prepare_causal_attention_mask(b_size, seq_len, seqlen_offset)?) } } }; let mut xs = self.embed_tokens.forward(input_ids)?; for layer in self.layers.iter_mut() { xs = layer.forward(&xs, attention_mask.as_ref(), seqlen_offset)? } xs.apply(&self.norm) } pub fn clear_kv_cache(&mut self) { for layer in self.layers.iter_mut() { layer.clear_kv_cache() } } } #[derive(Debug, Clone)] pub struct ModelForCausalLM { base_model: Model, lm_head: Linear, } impl ModelForCausalLM { pub fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> { let base_model = Model::new(cfg, vb.clone())?; let lm_head = if vb.contains_tensor("lm_head.weight") { linear_no_bias(cfg.hidden_size, cfg.vocab_size, vb.pp("lm_head"))? } else { Linear::from_weights(base_model.embed_tokens.embeddings().clone(), None) }; Ok(Self { base_model, lm_head, }) } pub fn forward(&mut self, input_ids: &Tensor, seqlen_offset: usize) -> Result<Tensor> { let (_b_size, seq_len) = input_ids.dims2()?; self.base_model .forward(input_ids, seqlen_offset, None)? .narrow(1, seq_len - 1, 1)? .apply(&self.lm_head) } pub fn clear_kv_cache(&mut self) { self.base_model.clear_kv_cache() } }
2
0
hf_public_repos/candle/candle-transformers/src
hf_public_repos/candle/candle-transformers/src/models/t5.rs
//! T5 model implementation. //! //! T5 (Text-to-Text Transfer Transformer) is a unified text-to-text transformer model. //! This implementation follows the original model architecture. //! //! Key characteristics: //! - Text-to-text framework //! - Relative positional embeddings //! - T5-specific layer normalization //! - Encoder-decoder architecture //! - Support for sequence-to-sequence tasks //! //! References: //! - ⚡ [Interactive Wasm Example](https://huggingface.co/spaces/radames/Candle-T5-Generation-Wasm) //! - 💻[GH Model](https://github.com/huggingface/transformers/blob/main/src/transformers/models/t5/modeling_t5.py) //! - 🤗 [HF Link](https://huggingface.co/docs/transformers/model_doc/t5) //! - 📝 [T5 Paper](https://arxiv.org/abs/1910.10683) //! //! # Encoder-decoder example: //! //! ```bash //! cargo run --example t5 --release -- \ //! --model-id "t5-small" \ //! --prompt "translate to German: A beautiful candle." \ //! --decode //! > ... //! > Eine schöne Kerze. //! > 9 tokens generated (2.42 token/s) //! ``` //! //! Variants such as [flan-t5](https://huggingface.co/google/flan-t5-small), [flan-ul2](https://huggingface.co/google/flan-ul2) (with `--revision "refs/pr/25"`), and [Co-EdIT](https://huggingface.co/grammarly/coedit-large) are also supported. //! //! # Translation with MADLAD //! //! //! [MADLAD-400](https://arxiv.org/abs/2309.04662) is a series of multilingual machine translation T5 models trained on 250 billion tokens covering over 450 languages using publicly available data. These models are competitive with significantly larger models. //! //! ```bash //! cargo run --example t5 --release -- \ //! --model-id "jbochi/madlad400-3b-mt" \ //! --prompt "<2de> How are you, my friend?" \ //! --decode --temperature 0 //! ... //! Wie geht es dir, mein Freund? //! ``` //! //! ## Sentence embedding example //! //! ```bash //! cargo run --example t5 --release -- \ //! --model-id "t5-small" --prompt "A beautiful candle." //! ... //! [[[ 0.0515, -0.0541, -0.0761, ..., -0.0392, 0.1511, -0.0265], //! [-0.0974, 0.0998, -0.1659, ..., -0.2450, 0.1738, -0.0164], //! [ 0.0624, -0.1024, 0.0430, ..., -0.1388, 0.0564, -0.2962], //! [-0.0389, -0.1173, 0.0026, ..., 0.1064, -0.1065, 0.0990], //! [ 0.1300, 0.0027, -0.0326, ..., 0.0026, -0.0317, 0.0851]]] //! Tensor[[1, 5, 512], f32] //! Took 303.766583ms //! ``` use crate::models::with_tracing::Embedding; use candle::{DType, Device, Module, Result, Tensor, D}; use candle_nn::{Activation, VarBuilder}; use serde::Deserialize; use std::sync::Arc; #[derive(Debug, Clone)] pub struct Linear { weight: Tensor, span: tracing::Span, } pub fn linear_no_bias(d1: usize, d2: usize, vb: VarBuilder) -> Result<Linear> { let init_ws = candle_nn::init::DEFAULT_KAIMING_NORMAL; let weight = vb.get_with_hints((d2, d1), "weight", init_ws)?; let span = tracing::span!(tracing::Level::TRACE, "linear"); Ok(Linear { weight, span }) } impl Module for Linear { fn forward(&self, xs: &Tensor) -> Result<Tensor> { let _enter = self.span.enter(); let weight = self.weight.to_dtype(xs.dtype())?; let w = match *xs.dims() { [b1, b2, _, _] => weight.broadcast_left((b1, b2))?.t()?, [bsize, _, _] => weight.broadcast_left(bsize)?.t()?, _ => weight.t()?, }; xs.matmul(&w) } } fn default_relative_attention_max_distance() -> usize { 128 } fn default_is_decoder() -> bool { false } fn default_use_cache() -> bool { true } fn default_tie_word_embeddings() -> bool { true } fn get_mask(size: usize, device: &Device) -> Result<Tensor> { let mask: Vec<_> = (0..size) .flat_map(|i| (0..size).map(move |j| u8::from(j > i))) .collect(); Tensor::from_slice(&mask, (size, size), device) } fn masked_fill(on_false: &Tensor, mask: &Tensor, on_true: f32) -> Result<Tensor> { let shape = mask.shape(); let on_true = Tensor::new(on_true, on_false.device())?.broadcast_as(shape.dims())?; let m = mask.where_cond(&on_true, on_false)?; Ok(m) } #[derive(Debug, Deserialize, Default, Clone, PartialEq)] pub struct ActivationWithOptionalGating { pub gated: bool, pub activation: candle_nn::Activation, } pub fn deserialize_feed_forward_proj_activation<'de, D>( deserializer: D, ) -> std::result::Result<ActivationWithOptionalGating, D::Error> where D: serde::de::Deserializer<'de>, { match String::deserialize(deserializer)?.as_str() { "gated-gelu" => Ok(ActivationWithOptionalGating { gated: true, activation: candle_nn::Activation::NewGelu, }), "gated-silu" => Ok(ActivationWithOptionalGating { gated: true, activation: candle_nn::Activation::Silu, }), buf => { let activation = serde_plain::from_str(buf).map_err(serde::de::Error::custom)?; Ok(ActivationWithOptionalGating { gated: false, activation, }) } } } #[derive(Debug, Clone, PartialEq, Deserialize)] pub struct Config { pub vocab_size: usize, pub d_model: usize, pub d_kv: usize, pub d_ff: usize, pub num_layers: usize, pub num_decoder_layers: Option<usize>, pub num_heads: usize, pub relative_attention_num_buckets: usize, #[serde(default = "default_relative_attention_max_distance")] pub relative_attention_max_distance: usize, pub dropout_rate: f64, pub layer_norm_epsilon: f64, pub initializer_factor: f64, #[serde(default, deserialize_with = "deserialize_feed_forward_proj_activation")] pub feed_forward_proj: ActivationWithOptionalGating, #[serde(default = "default_tie_word_embeddings")] pub tie_word_embeddings: bool, #[serde(default = "default_is_decoder")] pub is_decoder: bool, pub is_encoder_decoder: bool, #[serde(default = "default_use_cache")] pub use_cache: bool, pub pad_token_id: usize, pub eos_token_id: usize, pub decoder_start_token_id: Option<usize>, } impl Default for Config { fn default() -> Self { Self { vocab_size: 32128, d_model: 512, d_kv: 64, d_ff: 2048, num_layers: 6, num_decoder_layers: None, num_heads: 8, relative_attention_num_buckets: 32, relative_attention_max_distance: 128, dropout_rate: 0.1, layer_norm_epsilon: 1e-6, initializer_factor: 1.0, feed_forward_proj: ActivationWithOptionalGating { gated: false, activation: Activation::Relu, }, tie_word_embeddings: true, is_decoder: false, is_encoder_decoder: true, use_cache: true, pad_token_id: 0, eos_token_id: 1, decoder_start_token_id: Some(0), } } } impl Config { // https://huggingface.co/facebook/musicgen-small/blob/495da4ad086b3416a27c6187f9239f9fd96f3962/config.json#L184 pub fn musicgen_small() -> Self { Self { d_ff: 3072, d_kv: 64, d_model: 768, dropout_rate: 0.1, eos_token_id: 1, feed_forward_proj: ActivationWithOptionalGating { gated: false, activation: Activation::Relu, }, tie_word_embeddings: true, initializer_factor: 1.0, is_decoder: false, is_encoder_decoder: true, layer_norm_epsilon: 1e-6, num_decoder_layers: Some(12), num_heads: 12, num_layers: 12, pad_token_id: 0, decoder_start_token_id: Some(0), relative_attention_max_distance: 128, relative_attention_num_buckets: 32, use_cache: true, vocab_size: 32128, } } } #[derive(Debug, Clone)] struct T5LayerNorm { weight: Tensor, variance_epsilon: f64, span: tracing::Span, } impl T5LayerNorm { fn load(h: usize, eps: f64, vb: VarBuilder) -> Result<Self> { let weight = vb.get(h, "weight")?; Ok(Self { weight, variance_epsilon: eps, span: tracing::span!(tracing::Level::TRACE, "layer-norm"), }) } } impl Module for T5LayerNorm { fn forward(&self, xs: &Tensor) -> Result<Tensor> { let _enter = self.span.enter(); let dtype = xs.dtype(); let xs_f32 = xs.to_dtype(DType::F32)?; // variance = hidden_states.to(torch.float32).pow(2).mean(-1, keepdim=True) let variance = xs_f32.sqr()?.mean_keepdim(D::Minus1)?; let xs = xs_f32.broadcast_div(&(variance + self.variance_epsilon)?.sqrt()?)?; let xs = xs.to_dtype(dtype)?; let xs = xs.broadcast_mul(&self.weight.to_dtype(dtype)?)?; Ok(xs) } } #[derive(Debug, Clone)] struct T5DenseActDense { wi: Linear, wo: Linear, act: Activation, span: tracing::Span, } impl T5DenseActDense { fn load(vb: VarBuilder, cfg: &Config) -> Result<Self> { let wi = linear_no_bias(cfg.d_model, cfg.d_ff, vb.pp("wi"))?; let wo = linear_no_bias(cfg.d_ff, cfg.d_model, vb.pp("wo"))?; Ok(Self { wi, wo, act: Activation::Relu, span: tracing::span!(tracing::Level::TRACE, "dense-act-dense"), }) } } impl Module for T5DenseActDense { fn forward(&self, xs: &Tensor) -> Result<Tensor> { let _enter = self.span.enter(); let xs = self.wi.forward(xs)?; let xs = self.act.forward(&xs)?; let xs = self.wo.forward(&xs)?; Ok(xs) } } #[derive(Debug, Clone)] struct T5DenseGatedActDense { wi_0: Linear, wi_1: Linear, wo: Linear, act: Activation, span: tracing::Span, } impl T5DenseGatedActDense { fn load(vb: VarBuilder, cfg: &Config) -> Result<Self> { let wi_0 = linear_no_bias(cfg.d_model, cfg.d_ff, vb.pp("wi_0"))?; let wi_1 = linear_no_bias(cfg.d_model, cfg.d_ff, vb.pp("wi_1"))?; let wo = linear_no_bias(cfg.d_ff, cfg.d_model, vb.pp("wo"))?; Ok(Self { wi_0, wi_1, wo, act: cfg.feed_forward_proj.activation, span: tracing::span!(tracing::Level::TRACE, "dense-gated-act-dense"), }) } } impl Module for T5DenseGatedActDense { fn forward(&self, xs: &Tensor) -> Result<Tensor> { let _enter = self.span.enter(); let hidden_gelu = self.act.forward(&self.wi_0.forward(xs)?)?; let hidden_linear = self.wi_1.forward(xs)?; let xs = hidden_gelu.broadcast_mul(&hidden_linear)?; let xs = self.wo.forward(&xs)?; Ok(xs) } } #[derive(Debug, Clone)] struct T5LayerFF { dense_act: Option<T5DenseActDense>, gated_dense_act: Option<T5DenseGatedActDense>, layer_norm: T5LayerNorm, span: tracing::Span, } impl T5LayerFF { fn load(vb: VarBuilder, cfg: &Config) -> Result<Self> { let layer_norm = T5LayerNorm::load(cfg.d_model, cfg.layer_norm_epsilon, vb.pp("layer_norm"))?; let (dense_act, gated_dense_act) = if cfg.feed_forward_proj.gated { ( None, Some(T5DenseGatedActDense::load(vb.pp("DenseReluDense"), cfg)?), ) } else { ( Some(T5DenseActDense::load(vb.pp("DenseReluDense"), cfg)?), None, ) }; Ok(Self { dense_act, gated_dense_act, layer_norm, span: tracing::span!(tracing::Level::TRACE, "layer-ff"), }) } } impl Module for T5LayerFF { fn forward(&self, xs: &Tensor) -> Result<Tensor> { let _enter = self.span.enter(); let ys = self.layer_norm.forward(xs)?; let ys = match &self.dense_act { Some(dense_act) => dense_act.forward(&ys)?, None => self.gated_dense_act.as_ref().unwrap().forward(&ys)?, }; let xs = (xs + ys)?; Ok(xs) } } #[derive(Debug, Clone)] struct T5Attention { q: Linear, k: Linear, v: Linear, o: Linear, n_heads: usize, d_kv: usize, relative_attention_bias: Option<Embedding>, relative_attention_num_buckets: usize, relative_attention_max_distance: usize, inner_dim: usize, use_cache: bool, kv_cache: Option<(Tensor, Tensor)>, span: tracing::Span, span_cache: tracing::Span, span_mm: tracing::Span, span_sm: tracing::Span, } impl T5Attention { fn load( has_relative_attention_bias: bool, decoder: bool, vb: VarBuilder, cfg: &Config, ) -> Result<Self> { let inner_dim = cfg.num_heads * cfg.d_kv; let q = linear_no_bias(cfg.d_model, inner_dim, vb.pp("q"))?; let k = linear_no_bias(cfg.d_model, inner_dim, vb.pp("k"))?; let v = linear_no_bias(cfg.d_model, inner_dim, vb.pp("v"))?; let o = linear_no_bias(inner_dim, cfg.d_model, vb.pp("o"))?; let relative_attention_bias = if has_relative_attention_bias { let emb = Embedding::new( cfg.relative_attention_num_buckets, cfg.num_heads, vb.pp("relative_attention_bias"), )?; Some(emb) } else { None }; Ok(Self { q, k, v, o, n_heads: cfg.num_heads, d_kv: cfg.d_kv, relative_attention_bias, relative_attention_num_buckets: cfg.relative_attention_num_buckets, relative_attention_max_distance: cfg.relative_attention_max_distance, inner_dim, use_cache: cfg.use_cache && decoder, kv_cache: None, span: tracing::span!(tracing::Level::TRACE, "attention"), span_cache: tracing::span!(tracing::Level::TRACE, "attention-cache"), span_mm: tracing::span!(tracing::Level::TRACE, "attention-mm"), span_sm: tracing::span!(tracing::Level::TRACE, "attention-sm"), }) } fn forward( &mut self, xs: &Tensor, position_bias: Option<&Tensor>, key_value_states: Option<&Tensor>, mask: Option<&Tensor>, ) -> Result<(Tensor, Option<Tensor>)> { // Performs Self-attention (if key_value_states is None) or attention // over source sentence (provided by key_value_states). let _enter = self.span.enter(); let kv_input = match key_value_states { None => xs, Some(key_value_states) => key_value_states, }; let (b_sz, q_len) = (xs.dim(0)?, xs.dim(1)?); let kv_len = kv_input.dim(1)?; let q = self.q.forward(xs)?; let k = self.k.forward(kv_input)?; let v = self.v.forward(kv_input)?; let q = q .reshape((b_sz, q_len, self.n_heads, self.d_kv))? .transpose(1, 2)? .contiguous()?; let mut k = k .reshape((b_sz, kv_len, self.n_heads, self.d_kv))? .transpose(1, 2)?; let mut v = v .reshape((b_sz, kv_len, self.n_heads, self.d_kv))? .transpose(1, 2)?; if self.use_cache && key_value_states.is_none() { let _enter = self.span_cache.enter(); if let Some((kv_cache_k, kv_cache_v)) = &self.kv_cache { k = Tensor::cat(&[kv_cache_k, &k], 2)?; v = Tensor::cat(&[kv_cache_v, &v], 2)?; }; self.kv_cache = Some((k.clone(), v.clone())); }; let k = k.contiguous()?; let v = v.contiguous()?; // TODO: Use flash_attn. let scores = { let _enter = self.span_mm.enter(); q.matmul(&k.t()?)? }; let scores = match mask { None => scores, Some(mask) => masked_fill( &scores, &mask .unsqueeze(0)? .unsqueeze(0)? .repeat((b_sz, self.n_heads))?, f32::NEG_INFINITY, )?, }; let (scores, position_bias) = match position_bias { Some(position_bias) => ( scores.broadcast_add(position_bias)?, Some(position_bias.clone()), ), None => match &self.relative_attention_bias { None => (scores, None), Some(relative_attention_bias) => { // This only handles the bidirectional case. let kv_len = k.dim(2)?; let (q_start, q_end) = match self.use_cache { true => ((kv_len - q_len) as u32, kv_len as u32), false => (0_u32, kv_len as u32), }; let num_buckets = self.relative_attention_num_buckets as u32 / 2; let max_exact = num_buckets / 2; let relative_position = (q_start..q_end) .map(|i| { (0..kv_len as u32) .map(|j| { if i < j { if j - i < max_exact { j - i + num_buckets } else { let b = f32::log( (j - i) as f32 / max_exact as f32, self.relative_attention_max_distance as f32 / max_exact as f32, ) * (num_buckets - max_exact) as f32; u32::min( max_exact + num_buckets + b as u32, self.relative_attention_num_buckets as u32 - 1, ) } } else if i - j < max_exact { i - j } else { let b = f32::log( (i - j) as f32 / max_exact as f32, self.relative_attention_max_distance as f32 / max_exact as f32, ) * (num_buckets - max_exact) as f32; u32::min(max_exact + b as u32, num_buckets - 1) } }) .collect::<Vec<u32>>() }) .collect::<Vec<Vec<_>>>(); let relative_buckets = Tensor::new(relative_position, q.device())?; let position_bias = relative_attention_bias .forward(&relative_buckets)? .permute((2, 0, 1))? .unsqueeze(0)? .to_dtype(scores.dtype())?; (scores.broadcast_add(&position_bias)?, Some(position_bias)) // TODO: position_bias_masked? } }, }; let attn_weights = { let _enter = self.span_sm.enter(); candle_nn::ops::softmax_last_dim(&scores)? }; let attn_output = attn_weights.matmul(&v)?; let attn_output = attn_output .transpose(1, 2)? .reshape((b_sz, q_len, self.inner_dim))?; let attn_output = self.o.forward(&attn_output)?; Ok((attn_output, position_bias)) } fn clear_kv_cache(&mut self) { self.kv_cache = None } } #[derive(Debug, Clone)] struct T5LayerSelfAttention { self_attention: T5Attention, layer_norm: T5LayerNorm, span: tracing::Span, } impl T5LayerSelfAttention { fn load(h: bool, d: bool, vb: VarBuilder, cfg: &Config) -> Result<Self> { let self_attention = T5Attention::load(h, d, vb.pp("SelfAttention"), cfg)?; let layer_norm = T5LayerNorm::load(cfg.d_model, cfg.layer_norm_epsilon, vb.pp("layer_norm"))?; Ok(Self { self_attention, layer_norm, span: tracing::span!(tracing::Level::TRACE, "self-attn"), }) } fn forward( &mut self, xs: &Tensor, position_bias: Option<&Tensor>, mask: Option<&Tensor>, ) -> Result<(Tensor, Option<Tensor>)> { let _enter = self.span.enter(); let normed_xs = self.layer_norm.forward(xs)?; let (ys, position_bias) = self.self_attention .forward(&normed_xs, position_bias, None, mask)?; let ys = (xs + ys)?; Ok((ys, position_bias)) } fn clear_kv_cache(&mut self) { self.self_attention.clear_kv_cache() } } #[derive(Debug, Clone)] struct T5LayerCrossAttention { cross_attention: T5Attention, layer_norm: T5LayerNorm, span: tracing::Span, } impl T5LayerCrossAttention { fn load(decoder: bool, vb: VarBuilder, cfg: &Config) -> Result<Self> { let cross_attention = T5Attention::load(false, decoder, vb.pp("EncDecAttention"), cfg)?; let layer_norm = T5LayerNorm::load(cfg.d_model, cfg.layer_norm_epsilon, vb.pp("layer_norm"))?; Ok(Self { cross_attention, layer_norm, span: tracing::span!(tracing::Level::TRACE, "cross-attn"), }) } fn forward( &mut self, hidden_states: &Tensor, position_bias: Option<&Tensor>, key_value_states: &Tensor, ) -> Result<(Tensor, Option<Tensor>)> { let _enter = self.span.enter(); let normed_hidden_states = self.layer_norm.forward(hidden_states)?; let (ys, position_bias) = self.cross_attention.forward( &normed_hidden_states, position_bias, Some(key_value_states), None, )?; let ys = (hidden_states + ys)?; Ok((ys, position_bias)) } fn clear_kv_cache(&mut self) { self.cross_attention.clear_kv_cache() } } #[derive(Debug, Clone)] struct T5Block { self_attn: T5LayerSelfAttention, cross_attn: Option<T5LayerCrossAttention>, ff: T5LayerFF, span: tracing::Span, } impl T5Block { fn load( has_relative_attention_bias: bool, decoder: bool, vb: VarBuilder, cfg: &Config, ) -> Result<Self> { let vb = vb.pp("layer"); let self_attn = T5LayerSelfAttention::load(has_relative_attention_bias, decoder, vb.pp("0"), cfg)?; let cross_attn = if cfg.is_decoder { Some(T5LayerCrossAttention::load(decoder, vb.pp("1"), cfg)?) } else { None }; let ff_i = if cross_attn.is_some() { 2 } else { 1 }; let ff = T5LayerFF::load(vb.pp(ff_i.to_string()), cfg)?; Ok(Self { self_attn, cross_attn, ff, span: tracing::span!(tracing::Level::TRACE, "block"), }) } fn forward( &mut self, xs: &Tensor, position_bias: Option<&Tensor>, encoder_hidden_states: Option<&Tensor>, ) -> Result<(Tensor, Option<Tensor>)> { let _enter = self.span.enter(); // TODO: Cache masks let mask = match self.cross_attn.is_some() { true => { let mask_len = xs.dim(1)?; // If the input seq length is 1, no need for a mask, this is also helpful to avoid shape // issues when using the KV cache in the decoder. if mask_len <= 1 { None } else { Some(get_mask(mask_len, xs.device())?) } } false => None, }; let (mut xs, position_bias) = self.self_attn.forward(xs, position_bias, mask.as_ref())?; // TODO: clamp for f16? if let Some(cross_attn) = &mut self.cross_attn { (xs, _) = cross_attn.forward(&xs, None, encoder_hidden_states.unwrap())?; // TODO: clamp for f16? } let xs = self.ff.forward(&xs)?; // TODO: clamp for f16? Ok((xs, position_bias)) } fn clear_kv_cache(&mut self) { self.self_attn.clear_kv_cache(); self.cross_attn.iter_mut().for_each(|c| c.clear_kv_cache()); } } #[derive(Debug, Clone)] struct T5Stack { block: Vec<T5Block>, shared: Arc<Embedding>, final_layer_norm: T5LayerNorm, span: tracing::Span, } impl T5Stack { fn load(decoder: bool, vb: VarBuilder, shared: &Arc<Embedding>, cfg: &Config) -> Result<Self> { let block = (0..cfg.num_layers) .map(|i| T5Block::load(i == 0, decoder, vb.pp(format!("block.{i}")), cfg)) .collect::<Result<Vec<_>>>()?; let final_layer_norm = T5LayerNorm::load( cfg.d_model, cfg.layer_norm_epsilon, vb.pp("final_layer_norm"), )?; Ok(Self { block, shared: shared.clone(), final_layer_norm, span: tracing::span!(tracing::Level::TRACE, "stack"), }) } fn forward( &mut self, input_ids: &Tensor, encoder_hidden_states: Option<&Tensor>, ) -> Result<Tensor> { self.forward_dt(input_ids, encoder_hidden_states, None) } fn forward_dt( &mut self, input_ids: &Tensor, encoder_hidden_states: Option<&Tensor>, dtype: Option<DType>, ) -> Result<Tensor> { let _enter = self.span.enter(); let input_embeds = self.shared.as_ref().forward(input_ids)?; let input_embeds = match dtype { None => input_embeds, Some(dtype) => input_embeds.to_dtype(dtype)?, }; let mut hidden_states = input_embeds; let mut position_bias = None; for block in self.block.iter_mut() { (hidden_states, position_bias) = block.forward( &hidden_states, position_bias.as_ref(), encoder_hidden_states, )? } self.final_layer_norm.forward(&hidden_states) } fn clear_kv_cache(&mut self) { self.block.iter_mut().for_each(|b| b.clear_kv_cache()) } } #[derive(Debug, Clone)] pub struct T5EncoderModel { encoder: T5Stack, device: Device, span: tracing::Span, } impl T5EncoderModel { pub fn load(vb: VarBuilder, cfg: &Config) -> Result<Self> { let shared_vb = if vb.contains_tensor("shared.weight") { vb.pp("shared") } else if vb.contains_tensor("decoder.embed_tokens") { vb.pp("decoder").pp("embed_tokens") } else { vb.pp("encoder").pp("embed_tokens") }; let shared = Embedding::new(cfg.vocab_size, cfg.d_model, shared_vb)?; let shared = Arc::new(shared); let encoder = T5Stack::load(false, vb.pp("encoder"), &shared, cfg)?; Ok(Self { encoder, device: vb.device().clone(), span: tracing::span!(tracing::Level::TRACE, "encoder"), }) } pub fn forward(&mut self, input_ids: &Tensor) -> Result<Tensor> { let _enter = self.span.enter(); self.encoder.forward(input_ids, None) } pub fn forward_dt(&mut self, input_ids: &Tensor, dtype: Option<DType>) -> Result<Tensor> { let _enter = self.span.enter(); self.encoder.forward_dt(input_ids, None, dtype) } pub fn device(&self) -> &Device { &self.device } pub fn clear_kv_cache(&mut self) { self.encoder.clear_kv_cache() } } #[derive(Debug, Clone)] pub struct T5ForConditionalGeneration { encoder: T5Stack, decoder: T5Stack, d_model: usize, tie_word_embeddings: bool, lm_head: Option<Linear>, shared: Arc<Embedding>, device: Device, span_decode: tracing::Span, span_decode_head: tracing::Span, } impl T5ForConditionalGeneration { pub fn load(vb: VarBuilder, cfg: &Config) -> Result<Self> { assert!(cfg.is_encoder_decoder); let d_model = cfg.d_model; let shared_vb = if vb.contains_tensor("shared.weight") { vb.pp("shared") } else { vb.pp("decoder").pp("embed_tokens") }; let shared = Embedding::new(cfg.vocab_size, cfg.d_model, shared_vb)?; let shared = Arc::new(shared); let mut encoder_cfg = cfg.clone(); encoder_cfg.is_decoder = false; encoder_cfg.use_cache = false; encoder_cfg.is_encoder_decoder = false; let encoder = T5Stack::load(false, vb.pp("encoder"), &shared, &encoder_cfg)?; let mut decoder_cfg = cfg.clone(); decoder_cfg.is_decoder = true; decoder_cfg.is_encoder_decoder = false; decoder_cfg.num_layers = cfg.num_decoder_layers.unwrap_or(cfg.num_layers); let decoder = T5Stack::load(true, vb.pp("decoder"), &shared, &decoder_cfg)?; let tie_word_embeddings = cfg.tie_word_embeddings; let lm_head = if tie_word_embeddings { None } else { Some(linear_no_bias( cfg.d_model, cfg.vocab_size, vb.pp("lm_head"), )?) }; Ok(Self { encoder, decoder, d_model, tie_word_embeddings, lm_head, shared, device: vb.device().clone(), span_decode: tracing::span!(tracing::Level::TRACE, "decode"), span_decode_head: tracing::span!(tracing::Level::TRACE, "decode-head"), }) } pub fn encode(&mut self, input_ids: &Tensor) -> Result<Tensor> { self.encoder.forward(input_ids, None) } pub fn decode( &mut self, decoder_input_ids: &Tensor, encoder_output: &Tensor, ) -> Result<Tensor> { let _enter = self.span_decode.enter(); let decoder_output = self .decoder .forward(decoder_input_ids, Some(encoder_output))?; let scaling_factor = if self.tie_word_embeddings { // Rescale output before projecting on vocab // See https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/transformer/transformer.py#L586 (self.d_model as f64).sqrt() } else { 1.0 }; let sequence_output = ((decoder_output .narrow(1, decoder_output.dim(1)? - 1, 1)? .squeeze(1)?) * scaling_factor)?; let output = { let _enter = self.span_decode_head.enter(); match self.lm_head { None => sequence_output.matmul(&self.shared.embeddings().t()?)?, Some(ref lm_head) => lm_head.forward(&sequence_output)?, } }; Ok(output) } pub fn forward(&mut self, input_ids: &Tensor, decoder_input_ids: &Tensor) -> Result<Tensor> { let encoder_output = self.encode(input_ids)?; self.decode(decoder_input_ids, &encoder_output) } pub fn device(&self) -> &Device { &self.device } pub fn clear_kv_cache(&mut self) { self.encoder.clear_kv_cache(); self.decoder.clear_kv_cache(); } }
3
0
hf_public_repos/candle/candle-transformers/src
hf_public_repos/candle/candle-transformers/src/models/quantized_stable_lm.rs
//! Module for quantized StableLM implementation. //! //! StableLM is a series of open-source large language models //! optimized for performance and stability. This implementation //! provides quantization support for efficient model deployment. //! //! Key characteristics: //! - RMSNorm for layer normalization //! - Rotary positional embeddings (RoPE) //! - Support for 8-bit quantization //! //! References: //! - [StableLM](https://github.com/Stability-AI/StableLM) //! use crate::quantized_nn::{layer_norm, linear, linear_no_bias, Embedding, Linear}; pub use crate::quantized_var_builder::VarBuilder; use candle::{DType, Device, Module, Result, Tensor, D}; use candle_nn::{Activation, LayerNorm}; use std::sync::Arc; pub use crate::models::stable_lm::Config; use crate::models::stable_lm::RotaryEmbedding; #[derive(Debug, Clone)] #[allow(clippy::upper_case_acronyms)] struct MLP { gate_proj: Linear, up_proj: Linear, down_proj: Linear, act_fn: Activation, span: tracing::Span, } impl MLP { fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> { let hidden_sz = cfg.hidden_size; let intermediate_sz = cfg.intermediate_size; let gate_proj = linear_no_bias(hidden_sz, intermediate_sz, vb.pp("gate_proj"))?; let up_proj = linear_no_bias(hidden_sz, intermediate_sz, vb.pp("up_proj"))?; let down_proj = linear_no_bias(intermediate_sz, hidden_sz, vb.pp("down_proj"))?; Ok(Self { gate_proj, up_proj, down_proj, act_fn: cfg.hidden_act, span: tracing::span!(tracing::Level::TRACE, "mlp"), }) } } impl Module for MLP { fn forward(&self, xs: &Tensor) -> Result<Tensor> { let _enter = self.span.enter(); let lhs = xs.apply(&self.gate_proj)?.apply(&self.act_fn)?; let rhs = xs.apply(&self.up_proj)?; (lhs * rhs)?.apply(&self.down_proj) } } #[derive(Debug, Clone)] struct Attention { q_proj: Linear, k_proj: Linear, v_proj: Linear, o_proj: Linear, num_heads: usize, num_kv_heads: usize, num_kv_groups: usize, head_dim: usize, hidden_size: usize, rotary_emb: Arc<RotaryEmbedding>, kv_cache: Option<(Tensor, Tensor)>, use_cache: bool, rotary_ndims: usize, span: tracing::Span, } impl Attention { fn new(rotary_emb: Arc<RotaryEmbedding>, cfg: &Config, vb: VarBuilder) -> Result<Self> { let hidden_sz = cfg.hidden_size; let head_dim = cfg.head_dim(); let num_heads = cfg.num_attention_heads; let num_kv_heads = cfg.num_key_value_heads; let linear_layer = if cfg.use_qkv_bias { linear } else { linear_no_bias }; let q_proj = linear_layer(hidden_sz, num_heads * head_dim, vb.pp("q_proj"))?; let k_proj = linear_layer(hidden_sz, num_kv_heads * head_dim, vb.pp("k_proj"))?; let v_proj = linear_layer(hidden_sz, num_kv_heads * head_dim, vb.pp("v_proj"))?; let o_proj = linear_no_bias(num_heads * head_dim, hidden_sz, vb.pp("o_proj"))?; Ok(Self { q_proj, k_proj, v_proj, o_proj, num_heads, num_kv_heads, num_kv_groups: cfg.num_kv_groups(), head_dim, hidden_size: hidden_sz, rotary_emb, kv_cache: None, use_cache: cfg.use_cache, rotary_ndims: cfg.rotary_ndims(), span: tracing::span!(tracing::Level::TRACE, "attn"), }) } fn forward( &mut self, xs: &Tensor, attention_mask: Option<&Tensor>, seqlen_offset: usize, ) -> Result<Tensor> { let _enter = self.span.enter(); let (b_sz, q_len, _) = xs.dims3()?; let query_states = self.q_proj.forward(xs)?; let key_states = self.k_proj.forward(xs)?; let value_states = self.v_proj.forward(xs)?; let query_states = query_states .reshape((b_sz, q_len, self.num_heads, self.head_dim))? .transpose(1, 2)?; let key_states = key_states .reshape((b_sz, q_len, self.num_kv_heads, self.head_dim))? .transpose(1, 2)?; let value_states = value_states .reshape((b_sz, q_len, self.num_kv_heads, self.head_dim))? .transpose(1, 2)?; let (rot_ndims, pass_ndims) = (self.rotary_ndims, self.head_dim - self.rotary_ndims); let query_rot = query_states.narrow(D::Minus1, 0, rot_ndims)?; let query_pass = query_states.narrow(D::Minus1, rot_ndims, pass_ndims)?; let key_rot = key_states.narrow(D::Minus1, 0, rot_ndims)?; let key_pass = key_states.narrow(D::Minus1, rot_ndims, pass_ndims)?; let (query_rot, key_rot) = self.rotary_emb .apply_rotary_emb_qkv(&query_rot, &key_rot, seqlen_offset)?; let query_states = Tensor::cat(&[query_rot, query_pass], D::Minus1)?.contiguous()?; let key_states = Tensor::cat(&[key_rot, key_pass], D::Minus1)?.contiguous()?; let (key_states, value_states) = match &self.kv_cache { None => (key_states, value_states), Some((prev_k, prev_v)) => { let key_states = Tensor::cat(&[prev_k, &key_states], 2)?; let value_states = Tensor::cat(&[prev_v, &value_states], 2)?; (key_states, value_states) } }; if self.use_cache { self.kv_cache = Some((key_states.clone(), value_states.clone())); } let key_states = crate::utils::repeat_kv(key_states, self.num_kv_groups)?.contiguous()?; let value_states = crate::utils::repeat_kv(value_states, self.num_kv_groups)?.contiguous()?; let attn_output = { let scale = 1f64 / f64::sqrt(self.head_dim as f64); let attn_weights = (query_states.matmul(&key_states.transpose(2, 3)?)? * scale)?; let attn_weights = match attention_mask { None => attn_weights, Some(mask) => attn_weights.broadcast_add(mask)?, }; let attn_weights = candle_nn::ops::softmax_last_dim(&attn_weights)?; attn_weights.matmul(&value_states)? }; attn_output .transpose(1, 2)? .reshape((b_sz, q_len, self.hidden_size))? .apply(&self.o_proj) } } #[derive(Debug, Clone)] struct DecoderLayer { self_attn: Attention, mlp: MLP, input_layernorm: LayerNorm, post_attention_layernorm: LayerNorm, span: tracing::Span, } impl DecoderLayer { fn new(rotary_emb: Arc<RotaryEmbedding>, cfg: &Config, vb: VarBuilder) -> Result<Self> { let self_attn = Attention::new(rotary_emb, cfg, vb.pp("self_attn"))?; let mlp = MLP::new(cfg, vb.pp("mlp"))?; let input_layernorm = layer_norm( cfg.hidden_size, cfg.layer_norm_eps, vb.pp("input_layernorm"), )?; let post_attention_layernorm = layer_norm( cfg.hidden_size, cfg.layer_norm_eps, vb.pp("post_attention_layernorm"), )?; Ok(Self { self_attn, mlp, input_layernorm, post_attention_layernorm, span: tracing::span!(tracing::Level::TRACE, "layer"), }) } fn forward( &mut self, xs: &Tensor, attention_mask: Option<&Tensor>, seqlen_offset: usize, ) -> Result<Tensor> { let _enter = self.span.enter(); let residual = xs; let xs = self.input_layernorm.forward(xs)?; let xs = self.self_attn.forward(&xs, attention_mask, seqlen_offset)?; let xs = (xs + residual)?; let residual = &xs; let xs = xs.apply(&self.post_attention_layernorm)?.apply(&self.mlp)?; residual + xs } } #[derive(Debug, Clone)] pub struct Model { embed_tokens: Embedding, layers: Vec<DecoderLayer>, norm: LayerNorm, lm_head: Linear, device: Device, span: tracing::Span, } impl Model { pub fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> { let vb_m = vb.pp("model"); let embed_tokens = Embedding::new(cfg.vocab_size, cfg.hidden_size, vb_m.pp("embed_tokens"))?; let rotary_emb = Arc::new(RotaryEmbedding::new(DType::F32, cfg, vb_m.device())?); let mut layers = Vec::with_capacity(cfg.num_hidden_layers); let vb_l = vb_m.pp("layers"); for layer_idx in 0..cfg.num_hidden_layers { let layer = DecoderLayer::new(rotary_emb.clone(), cfg, vb_l.pp(layer_idx))?; layers.push(layer) } let norm = layer_norm(cfg.hidden_size, cfg.layer_norm_eps, vb_m.pp("norm"))?; let lm_head = linear_no_bias(cfg.hidden_size, cfg.vocab_size, vb.pp("lm_head"))?; Ok(Self { embed_tokens, layers, norm, lm_head, device: vb.device().clone(), span: tracing::span!(tracing::Level::TRACE, "model"), }) } fn prepare_decoder_attention_mask( &self, b_size: usize, tgt_len: usize, seqlen_offset: usize, ) -> Result<Tensor> { // Sliding window mask? let mask: Vec<_> = (0..tgt_len) .flat_map(|i| (0..tgt_len).map(move |j| if i < j { f32::NEG_INFINITY } else { 0. })) .collect(); let mask = Tensor::from_slice(&mask, (tgt_len, tgt_len), &self.device)?; let mask = if seqlen_offset > 0 { let mask0 = Tensor::zeros((tgt_len, seqlen_offset), DType::F32, &self.device)?; Tensor::cat(&[&mask0, &mask], D::Minus1)? } else { mask }; mask.expand((b_size, 1, tgt_len, tgt_len + seqlen_offset))? .to_dtype(DType::F32) } pub fn forward(&mut self, input_ids: &Tensor, seqlen_offset: usize) -> Result<Tensor> { let _enter = self.span.enter(); let (b_size, seq_len) = input_ids.dims2()?; let attention_mask = if seq_len <= 1 { None } else { let mask = self.prepare_decoder_attention_mask(b_size, seq_len, seqlen_offset)?; Some(mask) }; let mut xs = self.embed_tokens.forward(input_ids)?; for layer in self.layers.iter_mut() { xs = layer.forward(&xs, attention_mask.as_ref(), seqlen_offset)? } xs.narrow(1, seq_len - 1, 1)? .apply(&self.norm)? .apply(&self.lm_head) } }
4
0
hf_public_repos/candle/candle-transformers/src
hf_public_repos/candle/candle-transformers/src/models/distilbert.rs
//! Implementation of DistilBert, a distilled version of BERT. //! //! See: //! - ["DistilBERT, a distilled version of BERT: smaller, faster, cheaper and lighter"](https://arxiv.org/abs/1910.01108) //! use super::with_tracing::{layer_norm, linear, LayerNorm, Linear}; use candle::{DType, Device, Result, Tensor}; use candle_nn::{Embedding, Module, VarBuilder}; use serde::Deserialize; pub const DTYPE: DType = DType::F32; fn masked_fill(on_false: &Tensor, mask: &Tensor, on_true: f32) -> Result<Tensor> { let shape = mask.shape(); let on_true = Tensor::new(on_true, on_false.device())?.broadcast_as(shape.dims())?; let m = mask.where_cond(&on_true, on_false)?; Ok(m) } #[derive(Debug, Clone, Copy, PartialEq, Eq, Deserialize)] #[serde(rename_all = "lowercase")] enum HiddenAct { Gelu, Relu, } struct HiddenActLayer { act: HiddenAct, span: tracing::Span, } impl HiddenActLayer { fn new(act: HiddenAct) -> Self { let span = tracing::span!(tracing::Level::TRACE, "hidden-act"); Self { act, span } } } impl Module for HiddenActLayer { fn forward(&self, xs: &Tensor) -> candle::Result<Tensor> { let _enter = self.span.enter(); match self.act { // https://github.com/huggingface/transformers/blob/cd4584e3c809bb9e1392ccd3fe38b40daba5519a/src/transformers/activations.py#L213 HiddenAct::Gelu => xs.gelu(), HiddenAct::Relu => xs.relu(), } } } #[derive(Debug, Clone, Copy, PartialEq, Eq, Deserialize, Default)] #[serde(rename_all = "lowercase")] enum PositionEmbeddingType { #[default] Absolute, } #[derive(Debug, Clone, PartialEq, Deserialize)] pub struct Config { vocab_size: usize, dim: usize, n_layers: usize, n_heads: usize, hidden_dim: usize, activation: HiddenAct, max_position_embeddings: usize, initializer_range: f64, pad_token_id: usize, #[serde(default)] position_embedding_type: PositionEmbeddingType, #[serde(default)] use_cache: bool, model_type: Option<String>, } impl Default for Config { fn default() -> Self { Self { vocab_size: 30522, dim: 768, n_layers: 12, n_heads: 12, hidden_dim: 3072, activation: HiddenAct::Gelu, max_position_embeddings: 512, initializer_range: 0.02, pad_token_id: 0, position_embedding_type: PositionEmbeddingType::Absolute, use_cache: true, model_type: Some("distilbert".to_string()), } } } struct Embeddings { word_embeddings: Embedding, position_embeddings: Embedding, layer_norm: LayerNorm, span: tracing::Span, } impl Embeddings { fn load(vb: VarBuilder, config: &Config) -> Result<Self> { let word_embeddings = candle_nn::embedding(config.vocab_size, config.dim, vb.pp("word_embeddings"))?; let position_embeddings = candle_nn::embedding( config.max_position_embeddings, config.dim, vb.pp("position_embeddings"), )?; let layer_norm = layer_norm(config.dim, 1e-12, vb.pp("LayerNorm"))?; Ok(Self { word_embeddings, position_embeddings, layer_norm, span: tracing::span!(tracing::Level::TRACE, "embeddings"), }) } fn forward(&self, input_ids: &Tensor) -> Result<Tensor> { let _enter = self.span.enter(); let (_bsize, seq_len) = input_ids.dims2()?; let input_embeddings = self.word_embeddings.forward(input_ids)?; let position_ids = (0..seq_len as u32).collect::<Vec<_>>(); let position_ids = Tensor::new(&position_ids[..], input_ids.device())?; let embeddings = input_embeddings.broadcast_add(&self.position_embeddings.forward(&position_ids)?)?; let embeddings = self.layer_norm.forward(&embeddings)?; Ok(embeddings) } } struct MultiHeadSelfAttention { q_lin: Linear, k_lin: Linear, v_lin: Linear, out_lin: Linear, n_heads: usize, attention_head_size: usize, span: tracing::Span, } impl MultiHeadSelfAttention { fn load(vb: VarBuilder, config: &Config) -> Result<Self> { let attention_head_size = config.dim / config.n_heads; let all_head_size = config.n_heads * attention_head_size; let dim = config.dim; let q_lin = linear(dim, all_head_size, vb.pp("q_lin"))?; let v_lin = linear(dim, all_head_size, vb.pp("v_lin"))?; let k_lin = linear(dim, all_head_size, vb.pp("k_lin"))?; let out_lin = linear(all_head_size, dim, vb.pp("out_lin"))?; Ok(Self { q_lin, k_lin, v_lin, out_lin, n_heads: config.n_heads, attention_head_size, span: tracing::span!(tracing::Level::TRACE, "attention"), }) } } impl MultiHeadSelfAttention { fn forward(&self, hidden_states: &Tensor, attention_mask: &Tensor) -> Result<Tensor> { let _enter = self.span.enter(); let (bs, q_length, _dim) = hidden_states.dims3()?; let dim_per_head = self.attention_head_size; let q = self.q_lin.forward(hidden_states)?; let k = self.k_lin.forward(hidden_states)?; let v = self.v_lin.forward(hidden_states)?; let q = q .reshape((bs, q_length, self.n_heads, dim_per_head))? .transpose(1, 2)?; let k = k .reshape((bs, q_length, self.n_heads, dim_per_head))? .transpose(1, 2)?; let v = v .reshape((bs, q_length, self.n_heads, dim_per_head))? .transpose(1, 2)?; let q: Tensor = (q / (dim_per_head as f64).sqrt())?; let scores = q.matmul(&k.transpose(2, 3)?.contiguous()?)?; let mask = attention_mask.broadcast_as(scores.shape())?; let scores = masked_fill(&scores.to_dtype(DType::F32)?, &mask, f32::NEG_INFINITY)?; let weights = candle_nn::ops::softmax(&scores, candle::D::Minus1)?; let context = weights.matmul(&v.contiguous()?)?; let context = context .transpose(1, 2)? .reshape((bs, q_length, self.n_heads * dim_per_head))? .contiguous()?; let context = self.out_lin.forward(&context)?; Ok(context) } } #[allow(clippy::upper_case_acronyms)] struct FFN { lin1: Linear, lin2: Linear, activation: HiddenActLayer, span: tracing::Span, } impl FFN { fn load(vb: VarBuilder, config: &Config) -> Result<Self> { let lin1 = linear(config.dim, config.hidden_dim, vb.pp("lin1"))?; let lin2 = linear(config.hidden_dim, config.dim, vb.pp("lin2"))?; Ok(Self { lin1, lin2, activation: HiddenActLayer::new(config.activation), span: tracing::span!(tracing::Level::TRACE, "ffn"), }) } } impl Module for FFN { fn forward(&self, hidden_states: &Tensor) -> Result<Tensor> { let _enter = self.span.enter(); hidden_states .apply(&self.lin1)? .apply(&self.activation)? .apply(&self.lin2) } } struct TransformerBlock { attention: MultiHeadSelfAttention, sa_layer_norm: LayerNorm, ffn: FFN, output_layer_norm: LayerNorm, span: tracing::Span, } impl TransformerBlock { fn load(vb: VarBuilder, config: &Config) -> Result<Self> { let attention = MultiHeadSelfAttention::load(vb.pp("attention"), config)?; let sa_layer_norm = layer_norm(config.dim, 1e-12, vb.pp("sa_layer_norm"))?; let ffn = FFN::load(vb.pp("ffn"), config)?; let output_layer_norm = layer_norm(config.dim, 1e-12, vb.pp("output_layer_norm"))?; Ok(Self { attention, sa_layer_norm, ffn, output_layer_norm, span: tracing::span!(tracing::Level::TRACE, "layer"), }) } } impl TransformerBlock { fn forward(&self, hidden_states: &Tensor, attention_mask: &Tensor) -> Result<Tensor> { let _enter = self.span.enter(); let sa_output = self.attention.forward(hidden_states, attention_mask)?; // TODO: Support cross-attention? // https://github.com/huggingface/transformers/blob/6eedfa6dd15dc1e22a55ae036f681914e5a0d9a1/src/transformers/models/bert/modeling_bert.py#L523 // TODO: Support something similar to `apply_chunking_to_forward`? let sa_output = sa_output.broadcast_add(hidden_states)?; let sa_output = self.sa_layer_norm.forward(&sa_output)?; let ffn_output = self.ffn.forward(&sa_output)?; let ffn_output = (&ffn_output + sa_output)?; let output = self.output_layer_norm.forward(&ffn_output)?; Ok(output) } } // https://github.com/huggingface/transformers/blob/6eedfa6dd15dc1e22a55ae036f681914e5a0d9a1/src/transformers/models/bert/modeling_bert.py#L556 struct Transformer { layers: Vec<TransformerBlock>, span: tracing::Span, } impl Transformer { fn load(vb: VarBuilder, config: &Config) -> Result<Self> { let layers = (0..config.n_layers) .map(|index| TransformerBlock::load(vb.pp(format!("layer.{index}")), config)) .collect::<Result<Vec<_>>>()?; let span = tracing::span!(tracing::Level::TRACE, "encoder"); Ok(Transformer { layers, span }) } } impl Transformer { fn forward(&self, hidden_states: &Tensor, attention_mask: &Tensor) -> Result<Tensor> { let _enter = self.span.enter(); let mut hidden_states = hidden_states.clone(); // Use a loop rather than a fold as it's easier to modify when adding debug/... for layer in self.layers.iter() { hidden_states = layer.forward(&hidden_states, attention_mask)?; } Ok(hidden_states) } } pub struct DistilBertModel { embeddings: Embeddings, transformer: Transformer, pub device: Device, span: tracing::Span, } impl DistilBertModel { pub fn load(vb: VarBuilder, config: &Config) -> Result<Self> { let (embeddings, transformer) = match ( Embeddings::load(vb.pp("embeddings"), config), Transformer::load(vb.pp("transformer"), config), ) { (Ok(embeddings), Ok(encoder)) => (embeddings, encoder), (Err(err), _) | (_, Err(err)) => { if let Some(model_type) = &config.model_type { if let (Ok(embeddings), Ok(encoder)) = ( Embeddings::load(vb.pp(format!("{model_type}.embeddings")), config), Transformer::load(vb.pp(format!("{model_type}.transformer")), config), ) { (embeddings, encoder) } else { return Err(err); } } else { return Err(err); } } }; Ok(Self { embeddings, transformer, device: vb.device().clone(), span: tracing::span!(tracing::Level::TRACE, "model"), }) } pub fn forward(&self, input_ids: &Tensor, attention_mask: &Tensor) -> Result<Tensor> { let _enter = self.span.enter(); let embedding_output = self.embeddings.forward(input_ids)?; let sequence_output = self .transformer .forward(&embedding_output, attention_mask)?; Ok(sequence_output) } }
5
0
hf_public_repos/candle/candle-transformers/src
hf_public_repos/candle/candle-transformers/src/models/glm4.rs
//! GLM-4 inference implementation. //! //! An open bilingual language model with 130B parameters. //! //! Based on implementation from [ChatGLM-6B](https://github.com/THUDM/ChatGLM-6B) use crate::models::with_tracing::{linear_b as linear, Linear}; use candle::{DType, Device, IndexOp, Module, Result, Tensor, D}; use candle_nn::VarBuilder; #[derive(Debug, Clone)] pub struct Config { pub num_layers: usize, pub padded_vocab_size: usize, pub hidden_size: usize, pub ffn_hidden_size: usize, pub kv_channels: usize, pub num_attention_heads: usize, pub seq_length: usize, pub layernorm_epsilon: f64, pub rmsnorm: bool, pub apply_residual_connection_post_layernorm: bool, pub post_layer_norm: bool, pub add_bias_linear: bool, pub add_qkv_bias: bool, pub bias_dropout_fusion: bool, pub multi_query_attention: bool, pub multi_query_group_num: usize, pub apply_query_key_layer_scaling: bool, pub attention_softmax_in_fp32: bool, pub fp32_residual_connection: bool, } impl Config { pub fn glm4() -> Self { Self { num_layers: 40, padded_vocab_size: 151552, hidden_size: 4096, ffn_hidden_size: 13696, kv_channels: 128, num_attention_heads: 32, seq_length: 8192, layernorm_epsilon: 1e-5, rmsnorm: true, apply_residual_connection_post_layernorm: false, post_layer_norm: true, add_bias_linear: false, add_qkv_bias: true, bias_dropout_fusion: true, multi_query_attention: true, multi_query_group_num: 2, apply_query_key_layer_scaling: true, attention_softmax_in_fp32: true, fp32_residual_connection: false, } } } #[derive(Debug, Clone)] struct RotaryEmbedding { cache: Tensor, } impl RotaryEmbedding { fn new(cfg: &Config, dtype: DType, dev: &Device) -> Result<Self> { let rotary_dim = cfg.kv_channels; let n_elem = rotary_dim / 2; let inv_freq: Vec<_> = (0..n_elem) .step_by(2) .map(|i| 1f32 / 10_000f64.powf(i as f64 / n_elem as f64) as f32) .collect(); let inv_freq_len = inv_freq.len(); let inv_freq = Tensor::from_vec(inv_freq, (1, inv_freq_len), dev)?.to_dtype(dtype)?; let t = Tensor::arange(0u32, cfg.seq_length as u32, dev)? .to_dtype(dtype)? .reshape((cfg.seq_length, 1))?; let freqs = t.matmul(&inv_freq)?; let cache = Tensor::stack(&[&freqs.cos()?, &freqs.sin()?], D::Minus1)?; Ok(Self { cache }) } fn apply(&self, xs: &Tensor, seqlen_offset: usize) -> Result<Tensor> { let (seqlen, _b, np, _hn) = xs.dims4()?; let cache = self.cache.narrow(0, seqlen_offset, seqlen)?; let rot_dim = cache.dim(D::Minus2)? * 2; let (xs, xs_pass) = ( xs.narrow(D::Minus1, 0, rot_dim)?, xs.narrow(D::Minus1, rot_dim, rot_dim)?, ); let xshaped = xs.reshape((seqlen, (), np, rot_dim / 2, 2))?; let cache = cache.reshape((seqlen, (), 1, rot_dim / 2, 2))?; let (xshaped0, xshaped1) = ( xshaped.i((.., .., .., .., 0))?, xshaped.i((.., .., .., .., 1))?, ); let (cache0, cache1) = (cache.i((.., .., .., .., 0))?, cache.i((.., .., .., .., 1))?); let xs_out = Tensor::stack( &[ (xshaped0.broadcast_mul(&cache0)? - xshaped1.broadcast_mul(&cache1)?)?, (xshaped1.broadcast_mul(&cache0)? + xshaped0.broadcast_mul(&cache1)?)?, ], D::Minus1, )?; let xs_out = xs_out.flatten_from(3)?; Tensor::cat(&[xs_out, xs_pass], D::Minus1) } } #[derive(Debug, Clone)] struct CoreAttention { coeff: Option<f64>, norm_factor: f64, dtype: DType, } fn masked_fill(on_false: &Tensor, mask: &Tensor, on_true: f32, dtype: DType) -> Result<Tensor> { let shape = mask.shape(); let on_true = Tensor::new(on_true, on_false.device())?.broadcast_as(shape.dims())?; let m = mask.where_cond(&on_true.to_dtype(dtype)?, on_false)?; Ok(m) } impl CoreAttention { fn new(layer_number: usize, cfg: &Config, dtype: DType) -> Result<Self> { let norm_factor = (cfg.kv_channels as f64).sqrt(); let (norm_factor, coeff) = if cfg.apply_query_key_layer_scaling { let coeff = f64::max(1.0, layer_number as f64); (norm_factor * coeff, Some(coeff)) } else { (norm_factor, None) }; Ok(Self { coeff, norm_factor, dtype, }) } fn forward( &self, query_layer: &Tensor, key_layer: &Tensor, value_layer: &Tensor, attention_mask: &Option<Tensor>, ) -> Result<Tensor> { let output_size = ( query_layer.dim(1)?, // b query_layer.dim(2)?, // np query_layer.dim(0)?, // sq key_layer.dim(0)?, // sk ); let query_layer = query_layer.reshape((output_size.2, output_size.0 * output_size.1, ()))?; let key_layer = key_layer.reshape((output_size.3, output_size.0 * output_size.1, ()))?; let matmul_result = Tensor::matmul( &query_layer.transpose(0, 1)?.contiguous()?, &key_layer.transpose(0, 1)?.transpose(1, 2)?.contiguous()?, )?; let matmul_result = (matmul_result / self.norm_factor)?.reshape(output_size)?; let matmul_result = match self.coeff { None => matmul_result, Some(coeff) => (matmul_result * coeff)?, }; let attention_scores = match attention_mask { Some(mask) => masked_fill( &matmul_result, &mask.broadcast_left((matmul_result.dim(0)?, matmul_result.dim(1)?))?, f32::NEG_INFINITY, self.dtype, )?, None => matmul_result, }; let attention_probs = candle_nn::ops::softmax_last_dim(&attention_scores)?; let output_size = ( value_layer.dim(1)?, value_layer.dim(2)?, query_layer.dim(0)?, value_layer.dim(3)?, ); let value_layer = value_layer.reshape((value_layer.dim(0)?, output_size.0 * output_size.1, ()))?; let attention_probs = attention_probs.reshape((output_size.0 * output_size.1, output_size.2, ()))?; let context_layer = Tensor::matmul( &attention_probs.contiguous()?, &value_layer.transpose(0, 1)?.contiguous()?, )?; let context_layer = context_layer.reshape(output_size)?; let context_layer = context_layer.permute((2, 0, 1, 3))?.contiguous()?; context_layer.flatten_from(D::Minus2) } } #[derive(Debug, Clone)] struct SelfAttention { query_key_value: Linear, core_attention: CoreAttention, dense: Linear, multi_query_attention: bool, num_attention_heads_per_partition: usize, num_multi_query_groups_per_partition: usize, hidden_size_per_attention_head: usize, kv_cache: Option<(Tensor, Tensor)>, } impl SelfAttention { fn new(layer_number: usize, cfg: &Config, vb: VarBuilder) -> Result<Self> { let projection_size = cfg.kv_channels * cfg.num_attention_heads; let hidden_size_per_attention_head = projection_size / cfg.num_attention_heads; let qkv_hidden_size = if cfg.multi_query_attention { projection_size + 2 * hidden_size_per_attention_head * cfg.multi_query_group_num } else { 3 * projection_size }; let query_key_value = linear( cfg.hidden_size, qkv_hidden_size, cfg.add_bias_linear || cfg.add_qkv_bias, vb.pp("query_key_value"), )?; let core_attention = CoreAttention::new(layer_number, cfg, vb.dtype())?; let dense = linear( cfg.hidden_size, cfg.hidden_size, cfg.add_bias_linear, vb.pp("dense"), )?; Ok(Self { query_key_value, core_attention, dense, multi_query_attention: cfg.multi_query_attention, num_attention_heads_per_partition: cfg.num_attention_heads, num_multi_query_groups_per_partition: cfg.multi_query_group_num, hidden_size_per_attention_head: cfg.kv_channels, kv_cache: None, }) } fn reset_kv_cache(&mut self) { self.kv_cache = None } fn forward( &mut self, xs: &Tensor, attention_mask: &Option<Tensor>, rotary_emb: &RotaryEmbedding, ) -> Result<Tensor> { let mixed_x_layer = xs.apply(&self.query_key_value)?; if !self.multi_query_attention { candle::bail!("only multi_query_attention=true is supported") } let hpa = self.hidden_size_per_attention_head; let query_layer = mixed_x_layer.narrow(D::Minus1, 0, self.num_attention_heads_per_partition * hpa)?; let key_layer = mixed_x_layer.narrow( D::Minus1, self.num_attention_heads_per_partition * hpa, self.num_multi_query_groups_per_partition * hpa, )?; let value_layer = mixed_x_layer.narrow( D::Minus1, self.num_attention_heads_per_partition * hpa + self.num_multi_query_groups_per_partition * hpa, self.num_multi_query_groups_per_partition * hpa, )?; let query_layer = query_layer.reshape(( query_layer.dim(0)?, query_layer.dim(1)?, self.num_attention_heads_per_partition, hpa, ))?; let key_layer = key_layer.reshape(( key_layer.dim(0)?, key_layer.dim(1)?, self.num_multi_query_groups_per_partition, hpa, ))?; let value_layer = value_layer.reshape(( value_layer.dim(0)?, value_layer.dim(1)?, self.num_multi_query_groups_per_partition, hpa, ))?; // Rotary embeddings. let seqlen_offset = match &self.kv_cache { None => 0, Some((prev_k, _)) => prev_k.dim(0)?, }; let query_layer = rotary_emb.apply(&query_layer, seqlen_offset)?; let key_layer = rotary_emb.apply(&key_layer, seqlen_offset)?; // KV cache. let (key_layer, value_layer) = match &self.kv_cache { None => (key_layer, value_layer), Some((prev_k, prev_v)) => { let k = Tensor::cat(&[prev_k, &key_layer], 0)?; let v = Tensor::cat(&[prev_v, &value_layer], 0)?; (k, v) } }; self.kv_cache = Some((key_layer.clone(), value_layer.clone())); // Repeat KV. let ratio = self.num_attention_heads_per_partition / self.num_multi_query_groups_per_partition; let key_layer = { let (d0, d1, d2, d3) = key_layer.dims4()?; key_layer .unsqueeze(D::Minus2)? .expand((d0, d1, d2, ratio, d3))? .reshape(( d0, d1, self.num_attention_heads_per_partition, self.hidden_size_per_attention_head, ))? }; let value_layer = { let (d0, d1, d2, d3) = value_layer.dims4()?; value_layer .unsqueeze(D::Minus2)? .expand((d0, d1, d2, ratio, d3))? .reshape(( d0, d1, self.num_attention_heads_per_partition, self.hidden_size_per_attention_head, ))? }; let context_layer = self.core_attention .forward(&query_layer, &key_layer, &value_layer, attention_mask)?; let output = context_layer.apply(&self.dense)?; Ok(output) } } #[allow(clippy::upper_case_acronyms)] #[derive(Debug, Clone)] struct MLP { dense_h_to_4h: Linear, dense_4h_to_h: Linear, } impl MLP { fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> { let dense_h_to_4h = linear( cfg.hidden_size, cfg.ffn_hidden_size * 2, cfg.add_bias_linear, vb.pp("dense_h_to_4h"), )?; let dense_4h_to_h = linear( cfg.ffn_hidden_size, cfg.hidden_size, cfg.add_bias_linear, vb.pp("dense_4h_to_h"), )?; Ok(Self { dense_4h_to_h, dense_h_to_4h, }) } } impl Module for MLP { fn forward(&self, xs: &Tensor) -> Result<Tensor> { xs.apply(&self.dense_h_to_4h)? .apply(&candle_nn::Activation::Swiglu)? .apply(&self.dense_4h_to_h) } } #[derive(Debug, Clone)] struct Block { input_layernorm: candle_nn::LayerNorm, self_attention: SelfAttention, post_attention_layernorm: candle_nn::LayerNorm, mlp: MLP, apply_residual_connection_post_layernorm: bool, } impl Block { fn new(layer_number: usize, cfg: &Config, vb: VarBuilder) -> Result<Self> { let input_layernorm = if cfg.rmsnorm { candle_nn::rms_norm( cfg.hidden_size, cfg.layernorm_epsilon, vb.pp("input_layernorm"), )? .into_inner() } else { candle_nn::layer_norm( cfg.hidden_size, cfg.layernorm_epsilon, vb.pp("input_layernorm"), )? }; let post_attention_layernorm = if cfg.rmsnorm { candle_nn::rms_norm( cfg.hidden_size, cfg.layernorm_epsilon, vb.pp("post_attention_layernorm"), )? .into_inner() } else { candle_nn::layer_norm( cfg.hidden_size, cfg.layernorm_epsilon, vb.pp("post_attention_layernorm"), )? }; let self_attention = SelfAttention::new(layer_number, cfg, vb.pp("self_attention"))?; let mlp = MLP::new(cfg, vb.pp("mlp"))?; Ok(Self { input_layernorm, self_attention, post_attention_layernorm, mlp, apply_residual_connection_post_layernorm: cfg.apply_residual_connection_post_layernorm, }) } fn reset_kv_cache(&mut self) { self.self_attention.reset_kv_cache() } fn forward( &mut self, xs: &Tensor, attention_mask: &Option<Tensor>, rotary_emb: &RotaryEmbedding, ) -> Result<Tensor> { let layernorm_output = xs.apply(&self.input_layernorm)?; let attention_output = self.self_attention .forward(&layernorm_output, attention_mask, rotary_emb)?; let residual = if self.apply_residual_connection_post_layernorm { &layernorm_output } else { xs }; let layernorm_input = (residual + attention_output)?; let layernorm_output = layernorm_input.apply(&self.post_attention_layernorm)?; let mlp_output = layernorm_output.apply(&self.mlp)?; let residual = if self.apply_residual_connection_post_layernorm { &layernorm_output } else { &layernorm_input }; mlp_output + residual } } #[derive(Debug, Clone)] struct Transformer { layers: Vec<Block>, final_layernorm: Option<candle_nn::LayerNorm>, rotary_emb: RotaryEmbedding, } impl Transformer { fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> { let vb_l = vb.pp("layers"); let mut layers = Vec::with_capacity(cfg.num_layers); for layer_index in 0..cfg.num_layers { let block = Block::new(layer_index + 1, cfg, vb_l.pp(layer_index))?; layers.push(block) } let final_layernorm = if cfg.post_layer_norm { let ln = if cfg.rmsnorm { candle_nn::rms_norm( cfg.hidden_size, cfg.layernorm_epsilon, vb.pp("final_layernorm"), )? .into_inner() } else { candle_nn::layer_norm( cfg.hidden_size, cfg.layernorm_epsilon, vb.pp("final_layernorm"), )? }; Some(ln) } else { None }; let rotary_emb = RotaryEmbedding::new(cfg, vb.dtype(), vb.device())?; Ok(Self { layers, final_layernorm, rotary_emb, }) } fn reset_kv_cache(&mut self) { for block in self.layers.iter_mut() { block.reset_kv_cache() } } fn forward(&mut self, xs: &Tensor, attention_mask: &Option<Tensor>) -> Result<Tensor> { let mut xs = xs.clone(); for block in self.layers.iter_mut() { xs = block.forward(&xs, attention_mask, &self.rotary_emb)? } match self.final_layernorm.as_ref() { None => Ok(xs), Some(ln) => xs.apply(ln), } } } #[derive(Debug, Clone)] struct Embedding { word_embeddings: candle_nn::Embedding, fp32_residual_connection: bool, } impl Embedding { fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> { let word_embeddings = candle_nn::embedding( cfg.padded_vocab_size, cfg.hidden_size, vb.pp("word_embeddings"), )?; Ok(Self { word_embeddings, fp32_residual_connection: cfg.fp32_residual_connection, }) } } impl Module for Embedding { fn forward(&self, xs: &Tensor) -> Result<Tensor> { let xs = self.word_embeddings.forward(xs)?.transpose(0, 1)?; // b,s,h -> s,b,h if self.fp32_residual_connection { xs.to_dtype(candle::DType::F32) } else { xs.contiguous() } } } #[derive(Debug, Clone)] pub struct Model { embedding: Embedding, encoder: Transformer, output_layer: Linear, } fn get_mask(size: usize, device: &Device) -> Result<Tensor> { let mask: Vec<_> = (0..size) .flat_map(|i| (0..size).map(move |j| u8::from(j > i))) .collect(); Tensor::from_slice(&mask, (size, size), device) } impl Model { pub fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> { let vb = vb.pp("transformer"); let embedding = Embedding::new(cfg, vb.pp("embedding"))?; let encoder = Transformer::new(cfg, vb.pp("encoder"))?; let output_layer = linear( cfg.hidden_size, cfg.padded_vocab_size, false, vb.pp("output_layer"), )?; Ok(Self { embedding, encoder, output_layer, }) } pub fn reset_kv_cache(&mut self) { self.encoder.reset_kv_cache() } pub fn forward(&mut self, xs: &Tensor) -> Result<Tensor> { let (_b_size, seq_len) = xs.dims2()?; let input_embeds = xs.apply(&self.embedding)?; let attention_mask = if seq_len <= 1 { None } else { Some(get_mask(seq_len, xs.device())?) }; let xs = self.encoder.forward(&input_embeds, &attention_mask)?; let lm_logits = xs.i(seq_len - 1)?.apply(&self.output_layer)?; Ok(lm_logits) } }
6
0
hf_public_repos/candle/candle-transformers/src
hf_public_repos/candle/candle-transformers/src/models/eva2.rs
//! EVA-2 inference implementation. //! //! EVA-02 is a computer vision model that can be used as an ImageNet classifier. //! The model returns the probability for an image to belong to each of the 1000 //! ImageNet categories. //! //! - [Paper](https://arxiv.org/abs/2303.11331). EVA-02: A Visual Representation for Neon Genesis //! - [Code](https://github.com/huggingface/pytorch-image-models/blob/main/timm/models/eva2.py) //! //! # Example //! //! ```bash //! cargo run \ //! --example eva2 \ //! --release -- \ //! --image candle-examples/examples/yolo-v8/assets/bike.jpg //! //! > mountain bike, all-terrain bike, off-roader: 37.09% //! > maillot : 8.30% //! > alp : 2.13% //! > bicycle-built-for-two, tandem bicycle, tandem: 0.84% //! > crash helmet : 0.73% //! ``` //! //! <div align=center> //! <img src="https://github.com/huggingface/candle/raw/main/candle-examples/examples/yolo-v8/assets/bike.jpg" alt="" width=640> //! </div> //! use candle::{IndexOp, Result, Tensor, D}; use candle_nn::{layer_norm, LayerNorm, Linear, Module, VarBuilder}; const IMG_SIZE: usize = 448; const PATCH_SIZE: usize = 14; const NUM_CLASSES: usize = 1000; fn linear(vb: VarBuilder, in_dim: usize, out_dim: usize, bias: bool) -> Result<Linear> { if bias { candle_nn::linear(in_dim, out_dim, vb) } else { candle_nn::linear_no_bias(in_dim, out_dim, vb) } } #[derive(Debug)] struct Attention { q: Linear, k: Linear, v: Linear, proj: Linear, rot_pos_embed: Tensor, num_heads: usize, scale: f64, } impl Attention { fn new( vb: VarBuilder, dim: usize, num_heads: usize, qkv_bias: bool, proj_bias: bool, rot_pos_embed: &Tensor, ) -> Result<Self> { let q = linear(vb.pp("q_proj"), dim, dim, qkv_bias)?; let k = linear(vb.pp("k_proj"), dim, dim, false)?; // no bias for Key let v = linear(vb.pp("v_proj"), dim, dim, qkv_bias)?; let proj = linear(vb.pp("proj"), dim, dim, proj_bias)?; let rot_pos_embed = rot_pos_embed.clone(); let scale = 1. / ((dim / num_heads) as f64).sqrt(); Ok(Self { q, k, v, proj, rot_pos_embed, num_heads, scale, }) } } impl Attention { // See: https://github.com/huggingface/pytorch-image-models/blob/main/timm/layers/pos_embed_sincos.py#L210 fn apply_rot_embed_cat(x: &Tensor, emb: &Tensor) -> Result<Tensor> { let cos_emb = emb.i((0.., 64..128))?; //.transpose(0, 1)?; let sin_emb = emb.i((0.., 0..64))?; //.transpose(0, 1)?; let index_even: [u32; 32] = (0u32..=63) .step_by(2) .collect::<Vec<_>>() .try_into() .expect("wrong size iterator"); let index_odd: [u32; 32] = (1u32..=63) .step_by(2) .collect::<Vec<_>>() .try_into() .expect("wrong size iterator"); let t_index_even = Tensor::new(&index_even, x.device())?; let t_index_odd = Tensor::new(&index_odd, x.device())?; let x_c = x.contiguous()?; let rot_x_even = x_c.index_select(&t_index_even, D::Minus1)?; let rot_x_odd_minus = (-1.0 * x_c.index_select(&t_index_odd, D::Minus1)?)?; let rot_x = Tensor::stack(&[&rot_x_odd_minus, &rot_x_even], D::Minus1)?.reshape(x.shape())?; x.broadcast_mul(&cos_emb)? + rot_x.broadcast_mul(&sin_emb)? } } impl Module for Attention { fn forward(&self, xs: &Tensor) -> Result<Tensor> { let (b, n, c) = xs.dims3()?; let qkv = Tensor::cat( &[ &self.q.forward(xs)?, &self.k.forward(xs)?, &self.v.forward(xs)?, ], 2, )? .reshape((b, n, 3, self.num_heads, c / self.num_heads))? .transpose(1, 2)? // 02134 .transpose(0, 1)? // 20134 .transpose(2, 3)?; // 20314 let q = qkv.i(0)?; let k = qkv.i(1)?.contiguous()?; let v = qkv.i(2)?.contiguous()?; let npt = 1; // num_prefix_tokens = 1 for CLS token let q = Tensor::cat( &[ &q.i((0.., 0.., ..npt, 0..))?, &Self::apply_rot_embed_cat(&q.i((0.., 0.., npt.., 0..))?, &self.rot_pos_embed)?, ], 2, )?; let k = Tensor::cat( &[ &k.i((0.., 0.., ..npt, 0..))?, &Self::apply_rot_embed_cat(&k.i((0.., 0.., npt.., 0..))?, &self.rot_pos_embed)?, ], 2, )?; let q = (q * self.scale)?; let attn = &q.matmul(&k.t()?)?; let attn = candle_nn::ops::softmax(attn, D::Minus1)?; let attn = attn.matmul(&v)?.transpose(1, 2)?.reshape((b, n, c))?; self.proj.forward(&attn) } } #[derive(Debug)] struct Mlp { fc1_g: Linear, fc1_x: Linear, norm: LayerNorm, fc2: Linear, } impl Mlp { fn new(vb: VarBuilder, in_features: usize, hidden_features: usize, bias: bool) -> Result<Self> { let out_features = in_features; let fc1_g = linear(vb.pp("fc1_g"), in_features, hidden_features, bias)?; let fc1_x = linear(vb.pp("fc1_x"), in_features, hidden_features, bias)?; let norm = layer_norm(hidden_features, 1e-6, vb.pp("norm"))?; let fc2 = linear(vb.pp("fc2"), hidden_features, out_features, bias)?; Ok(Self { fc1_g, fc1_x, norm, fc2, }) } } impl Module for Mlp { fn forward(&self, xs: &Tensor) -> Result<Tensor> { let xs_g = self.fc1_g.forward(xs)?.silu()?; let xs = self.fc1_x.forward(xs)?; let xs = self.norm.forward(&(xs_g.mul(&xs)?))?; self.fc2.forward(&xs) } } #[derive(Debug)] struct Block { norm1: LayerNorm, attn: Attention, norm2: LayerNorm, mlp: Mlp, } impl Block { fn new(vb: VarBuilder, dim: usize, num_heads: usize, rot_pos_embed: &Tensor) -> Result<Self> { let norm1 = layer_norm(dim, 1e-6, vb.pp("norm1"))?; let attn = Attention::new(vb.pp("attn"), dim, num_heads, true, true, rot_pos_embed)?; let norm2 = layer_norm(dim, 1e-6, vb.pp("norm2"))?; let hidden_dim = dim * 4 * 2 / 3; // 768 * 4 * 2 / 3 = 3072 * 2 / 3 = 2048 let mlp = Mlp::new(vb.pp("mlp"), dim, hidden_dim, true)?; Ok(Self { norm1, attn, norm2, mlp, }) } } impl Module for Block { fn forward(&self, xs: &Tensor) -> Result<Tensor> { let residual = xs; let xs = &self.attn.forward(&self.norm1.forward(xs)?)?; let xs = (xs + residual)?; let residual = &xs; let xs = &self.mlp.forward(&self.norm2.forward(&xs)?)?; xs + residual } } #[derive(Debug)] struct PatchEmbed { proj: candle_nn::Conv2d, patch_size: (usize, usize), num_patches: usize, } impl PatchEmbed { fn new( vb: VarBuilder, img_size: usize, patch_size: usize, in_chans: usize, embed_dim: usize, ) -> Result<Self> { let config = candle_nn::Conv2dConfig { stride: patch_size, ..Default::default() }; let proj = candle_nn::conv2d(in_chans, embed_dim, patch_size, config, vb.pp("proj"))?; let num_patches = (img_size / patch_size) * (img_size / patch_size); Ok(Self { proj, patch_size: (patch_size, patch_size), num_patches, }) } } impl Module for PatchEmbed { fn forward(&self, xs: &Tensor) -> Result<Tensor> { let (_b, _c, h, w) = xs.dims4()?; let (patch_h, patch_w) = self.patch_size; if (h % patch_h) != 0 { candle::bail!("image height {h} is not a multiple of patch height {patch_h}") } if (w % patch_w) != 0 { candle::bail!("image width {w} is not a multiple of patch width {patch_w}") } let xs = self.proj.forward(xs)?; let (b, c, h, w) = xs.dims4()?; // flatten embeddings. xs.reshape((b, c, h * w))?.transpose(1, 2) } } #[derive(Debug)] pub struct EVA2VisionTransformer { patch_embed: PatchEmbed, cls_token: Tensor, pos_embed: Tensor, blocks: Vec<Block>, norm: LayerNorm, head: Linear, } impl EVA2VisionTransformer { pub fn new(vb: VarBuilder, depth: usize, embed_dim: usize, num_heads: usize) -> Result<Self> { let patch_embed = PatchEmbed::new(vb.pp("patch_embed"), IMG_SIZE, PATCH_SIZE, 3, embed_dim)?; let cls_token = vb.get((1, 1, embed_dim), "cls_token")?; let pos_embed = vb.get((1, patch_embed.num_patches + 1, embed_dim), "pos_embed")?; let rot_pos_embed = vb.get((patch_embed.num_patches, 128), "rot_pos_embed")?; let head = linear(vb.pp("head"), embed_dim, NUM_CLASSES, true)?; let norm = layer_norm(embed_dim, 1e-6, vb.pp("norm"))?; let vb_b = vb.pp("blocks"); let blocks = (0..depth) .map(|i| Block::new(vb_b.pp(i.to_string()), embed_dim, num_heads, &rot_pos_embed)) .collect::<Result<Vec<_>>>()?; Ok(Self { patch_embed, cls_token, pos_embed, blocks, norm, head, }) } fn interpolate_pos_encoding( &self, xs: &Tensor, w: usize, h: usize, num_prefix_tokens: usize, ) -> Result<Tensor> { let npatch = xs.dim(1)? - 1; let n = self.pos_embed.dim(1)? - 1; let sqrt_n = (n as f64).sqrt(); if npatch == n && w == h { return Ok(self.pos_embed.clone()); } // Interpolate only local tokens, i.e. those after the CLS token let prefix_tokens_pos_embed = self.pos_embed.i((0.., ..num_prefix_tokens, 0..))?.clone(); let patch_pos_embed = &self.pos_embed.i((0.., num_prefix_tokens.., 0..))?; let dim = xs.dim(D::Minus1)?; let (w0, h0) = ((w / PATCH_SIZE) as f64 + 0.1, (h / PATCH_SIZE) as f64 + 0.1); let patch_pos_embed = patch_pos_embed .reshape((1, sqrt_n as usize, sqrt_n as usize, dim))? .transpose(2, 3)? .transpose(1, 2)?; // This uses bicubic interpolation in the original implementation. let patch_pos_embed = patch_pos_embed.upsample_nearest2d(h0 as usize, w0 as usize)?; let el_count = patch_pos_embed.shape().elem_count(); let patch_pos_embed = patch_pos_embed .transpose(1, 2)? .transpose(2, 3)? .reshape((1, el_count / dim, dim))?; Tensor::cat(&[&prefix_tokens_pos_embed, &patch_pos_embed], 1) } fn prepare_tokens_with_mask(&self, xs: &Tensor) -> Result<Tensor> { let (_b, _nc, w, h) = xs.dims4()?; if (w != IMG_SIZE) || (h != IMG_SIZE) { panic!("Error: The input tensor should have the shape: Bx3x518x518."); } let xs = self.patch_embed.forward(xs)?; let xs = Tensor::cat(&[&self.cls_token, &xs], 1)?; let xs = (&xs + &self.interpolate_pos_encoding(&xs, w, h, 1)?)?; Ok(xs) } fn get_intermediate_layers_not_chunked( &self, xs: &Tensor, blocks_to_take: &[usize], ) -> Result<Vec<Tensor>> { let mut xs = self.prepare_tokens_with_mask(xs)?; let mut output = Vec::new(); for (i, blk) in self.blocks.iter().enumerate() { xs = blk.forward(&xs)?; if blocks_to_take.contains(&i) { output.push(xs.clone()); } } if output.len() != blocks_to_take.len() { candle::bail!( "only {} / {} blocks found", output.len(), blocks_to_take.len() ); } Ok(output) } pub fn get_intermediate_layers( &self, xs: &Tensor, blocks_to_take: &[usize], reshape: bool, return_class_token: bool, norm: bool, ) -> Result<Tensor> { let outputs = self.get_intermediate_layers_not_chunked(xs, blocks_to_take)?; let outputs = if norm { outputs .iter() .map(|out| self.norm.forward(out)) .collect::<Result<Vec<_>>>()? } else { outputs }; let class_tokens = outputs .iter() .map(|out| out.i((.., 0))) .collect::<Result<Vec<_>>>()?; let outputs = outputs .iter() .map(|out| out.i((.., 1..))) .collect::<Result<Vec<_>>>()?; let outputs = if reshape { let (b, _c, w, h) = xs.dims4()?; let patch_size = self.patch_embed.patch_size.0; let num_channels = outputs[0].elem_count() / (b * (w / patch_size) * (h / patch_size)); outputs .iter() .map(|out| { out.reshape((b, w / patch_size, h / patch_size, num_channels))? .transpose(2, 3)? .transpose(1, 2) }) .collect::<Result<Vec<_>>>()? } else { outputs }; let outputs = if return_class_token { outputs .iter() .zip(class_tokens.iter()) .map(|(out, class_token)| Tensor::cat(&[out, class_token], D::Minus1)) .collect::<Result<Vec<_>>>()? } else { outputs }; Tensor::stack(&outputs[..], 0) } } impl Module for EVA2VisionTransformer { fn forward(&self, xs: &Tensor) -> Result<Tensor> { let mut xs = self.prepare_tokens_with_mask(xs)?; for blk in self.blocks.iter() { xs = blk.forward(&xs)? } let xs_moy_local_tokens = xs.i((.., 1..))?.mean(1)?; let xs_norm = self.norm.forward(&xs_moy_local_tokens)?; self.head.forward(&xs_norm) } } pub fn vit_base(vb: VarBuilder) -> Result<EVA2VisionTransformer> { EVA2VisionTransformer::new(vb, 12, 768, 12) } pub fn vit_large(vb: VarBuilder) -> Result<EVA2VisionTransformer> { EVA2VisionTransformer::new(vb, 24, 1024, 16) }
7
0
hf_public_repos/candle/candle-transformers/src
hf_public_repos/candle/candle-transformers/src/models/chatglm.rs
//! Implementation of the ChatGLM2/3 models from THUDM. //! //! - 💻 [Github](https://github.com/THUDM/ChatGLM3) ChatGLM3: Advancing Multilingual Conversational Language Models with High-Quality Data //! - 💻 [Github](https://github.com/THUDM/ChatGLM2-6B) ChatGLM2-6B. //! use crate::models::with_tracing::{linear_b as linear, Linear}; use candle::{DType, Device, IndexOp, Module, Result, Tensor, D}; use candle_nn::VarBuilder; #[derive(Debug, Clone)] pub struct Config { pub num_layers: usize, pub padded_vocab_size: usize, pub hidden_size: usize, pub ffn_hidden_size: usize, pub kv_channels: usize, pub num_attention_heads: usize, pub seq_length: usize, pub layernorm_epsilon: f64, pub rmsnorm: bool, pub apply_residual_connection_post_layernorm: bool, pub post_layer_norm: bool, pub add_bias_linear: bool, pub add_qkv_bias: bool, pub bias_dropout_fusion: bool, pub multi_query_attention: bool, pub multi_query_group_num: usize, pub apply_query_key_layer_scaling: bool, pub attention_softmax_in_fp32: bool, pub fp32_residual_connection: bool, } impl Config { pub fn glm3_6b() -> Self { Self { num_layers: 28, padded_vocab_size: 65024, hidden_size: 4096, ffn_hidden_size: 13696, kv_channels: 128, num_attention_heads: 32, seq_length: 8192, layernorm_epsilon: 1e-5, rmsnorm: true, apply_residual_connection_post_layernorm: false, post_layer_norm: true, add_bias_linear: false, add_qkv_bias: true, bias_dropout_fusion: true, multi_query_attention: true, multi_query_group_num: 2, apply_query_key_layer_scaling: true, attention_softmax_in_fp32: true, fp32_residual_connection: false, } } } #[derive(Debug, Clone)] struct RotaryEmbedding { cache: Tensor, } impl RotaryEmbedding { fn new(cfg: &Config, dtype: DType, dev: &Device) -> Result<Self> { let rotary_dim = cfg.kv_channels; let n_elem = rotary_dim / 2; let inv_freq: Vec<_> = (0..n_elem) .step_by(2) .map(|i| 1f32 / 10_000f64.powf(i as f64 / n_elem as f64) as f32) .collect(); let inv_freq_len = inv_freq.len(); let inv_freq = Tensor::from_vec(inv_freq, (1, inv_freq_len), dev)?.to_dtype(dtype)?; let t = Tensor::arange(0u32, cfg.seq_length as u32, dev)? .to_dtype(dtype)? .reshape((cfg.seq_length, 1))?; let freqs = t.matmul(&inv_freq)?; let cache = Tensor::stack(&[&freqs.cos()?, &freqs.sin()?], D::Minus1)?; Ok(Self { cache }) } fn apply(&self, xs: &Tensor, seqlen_offset: usize) -> Result<Tensor> { let (seqlen, _b, np, _hn) = xs.dims4()?; let cache = self.cache.narrow(0, seqlen_offset, seqlen)?; let rot_dim = cache.dim(D::Minus2)? * 2; let (xs, xs_pass) = ( xs.narrow(D::Minus1, 0, rot_dim)?, xs.narrow(D::Minus1, rot_dim, rot_dim)?, ); let xshaped = xs.reshape((seqlen, (), np, rot_dim / 2, 2))?; let cache = cache.reshape((seqlen, (), 1, rot_dim / 2, 2))?; let (xshaped0, xshaped1) = ( xshaped.i((.., .., .., .., 0))?, xshaped.i((.., .., .., .., 1))?, ); let (cache0, cache1) = (cache.i((.., .., .., .., 0))?, cache.i((.., .., .., .., 1))?); let xs_out = Tensor::stack( &[ (xshaped0.broadcast_mul(&cache0)? - xshaped1.broadcast_mul(&cache1)?)?, (xshaped1.broadcast_mul(&cache0)? + xshaped0.broadcast_mul(&cache1)?)?, ], D::Minus1, )?; let xs_out = xs_out.flatten_from(3)?; Tensor::cat(&[xs_out, xs_pass], D::Minus1) } } #[derive(Debug, Clone)] struct CoreAttention { coeff: Option<f64>, norm_factor: f64, } fn masked_fill(on_false: &Tensor, mask: &Tensor, on_true: f32) -> Result<Tensor> { let shape = mask.shape(); let on_true = Tensor::new(on_true, on_false.device())?.broadcast_as(shape.dims())?; let m = mask.where_cond(&on_true, on_false)?; Ok(m) } impl CoreAttention { fn new(layer_number: usize, cfg: &Config) -> Result<Self> { let norm_factor = (cfg.kv_channels as f64).sqrt(); let (norm_factor, coeff) = if cfg.apply_query_key_layer_scaling { let coeff = f64::max(1.0, layer_number as f64); (norm_factor * coeff, Some(coeff)) } else { (norm_factor, None) }; Ok(Self { coeff, norm_factor }) } fn forward( &self, query_layer: &Tensor, key_layer: &Tensor, value_layer: &Tensor, attention_mask: &Option<Tensor>, ) -> Result<Tensor> { let output_size = ( query_layer.dim(1)?, // b query_layer.dim(2)?, // np query_layer.dim(0)?, // sq key_layer.dim(0)?, // sk ); let query_layer = query_layer.reshape((output_size.2, output_size.0 * output_size.1, ()))?; let key_layer = key_layer.reshape((output_size.3, output_size.0 * output_size.1, ()))?; let matmul_result = Tensor::matmul( &query_layer.transpose(0, 1)?, &key_layer.transpose(0, 1)?.transpose(1, 2)?, )?; let matmul_result = (matmul_result / self.norm_factor)?.reshape(output_size)?; let matmul_result = match self.coeff { None => matmul_result, Some(coeff) => (matmul_result * coeff)?, }; let attention_scores = match attention_mask { Some(mask) => masked_fill( &matmul_result, &mask.broadcast_left((matmul_result.dim(0)?, matmul_result.dim(1)?))?, f32::NEG_INFINITY, )?, None => matmul_result, }; let attention_probs = candle_nn::ops::softmax_last_dim(&attention_scores)?; let output_size = ( value_layer.dim(1)?, value_layer.dim(2)?, query_layer.dim(0)?, value_layer.dim(3)?, ); let value_layer = value_layer.reshape((value_layer.dim(0)?, output_size.0 * output_size.1, ()))?; let attention_probs = attention_probs.reshape((output_size.0 * output_size.1, output_size.2, ()))?; let context_layer = Tensor::matmul(&attention_probs, &value_layer.transpose(0, 1)?)?; let context_layer = context_layer.reshape(output_size)?; let context_layer = context_layer.permute((2, 0, 1, 3))?.contiguous()?; context_layer.flatten_from(D::Minus2) } } #[derive(Debug, Clone)] struct SelfAttention { query_key_value: Linear, core_attention: CoreAttention, dense: Linear, multi_query_attention: bool, num_attention_heads_per_partition: usize, num_multi_query_groups_per_partition: usize, hidden_size_per_attention_head: usize, kv_cache: Option<(Tensor, Tensor)>, } impl SelfAttention { fn new(layer_number: usize, cfg: &Config, vb: VarBuilder) -> Result<Self> { let projection_size = cfg.kv_channels * cfg.num_attention_heads; let hidden_size_per_attention_head = projection_size / cfg.num_attention_heads; let qkv_hidden_size = if cfg.multi_query_attention { projection_size + 2 * hidden_size_per_attention_head * cfg.multi_query_group_num } else { 3 * projection_size }; let query_key_value = linear( cfg.hidden_size, qkv_hidden_size, cfg.add_bias_linear || cfg.add_qkv_bias, vb.pp("query_key_value"), )?; let core_attention = CoreAttention::new(layer_number, cfg)?; let dense = linear( cfg.hidden_size, cfg.hidden_size, cfg.add_bias_linear, vb.pp("dense"), )?; Ok(Self { query_key_value, core_attention, dense, multi_query_attention: cfg.multi_query_attention, num_attention_heads_per_partition: cfg.num_attention_heads, num_multi_query_groups_per_partition: cfg.multi_query_group_num, hidden_size_per_attention_head: cfg.kv_channels, kv_cache: None, }) } fn reset_kv_cache(&mut self) { self.kv_cache = None } fn forward( &mut self, xs: &Tensor, attention_mask: &Option<Tensor>, rotary_emb: &RotaryEmbedding, ) -> Result<Tensor> { let mixed_x_layer = xs.apply(&self.query_key_value)?; if !self.multi_query_attention { candle::bail!("only multi_query_attention=true is supported") } let hpa = self.hidden_size_per_attention_head; let query_layer = mixed_x_layer.narrow(D::Minus1, 0, self.num_attention_heads_per_partition * hpa)?; let key_layer = mixed_x_layer.narrow( D::Minus1, self.num_attention_heads_per_partition * hpa, self.num_multi_query_groups_per_partition * hpa, )?; let value_layer = mixed_x_layer.narrow( D::Minus1, self.num_attention_heads_per_partition * hpa + self.num_multi_query_groups_per_partition * hpa, self.num_multi_query_groups_per_partition * hpa, )?; let query_layer = query_layer.reshape(( query_layer.dim(0)?, query_layer.dim(1)?, self.num_attention_heads_per_partition, hpa, ))?; let key_layer = key_layer.reshape(( key_layer.dim(0)?, key_layer.dim(1)?, self.num_multi_query_groups_per_partition, hpa, ))?; let value_layer = value_layer.reshape(( value_layer.dim(0)?, value_layer.dim(1)?, self.num_multi_query_groups_per_partition, hpa, ))?; // Rotary embeddings. let seqlen_offset = match &self.kv_cache { None => 0, Some((prev_k, _)) => prev_k.dim(0)?, }; let query_layer = rotary_emb.apply(&query_layer, seqlen_offset)?; let key_layer = rotary_emb.apply(&key_layer, seqlen_offset)?; // KV cache. let (key_layer, value_layer) = match &self.kv_cache { None => (key_layer, value_layer), Some((prev_k, prev_v)) => { let k = Tensor::cat(&[prev_k, &key_layer], 0)?; let v = Tensor::cat(&[prev_v, &value_layer], 0)?; (k, v) } }; self.kv_cache = Some((key_layer.clone(), value_layer.clone())); // Repeat KV. let ratio = self.num_attention_heads_per_partition / self.num_multi_query_groups_per_partition; let key_layer = { let (d0, d1, d2, d3) = key_layer.dims4()?; key_layer .unsqueeze(D::Minus2)? .expand((d0, d1, d2, ratio, d3))? .reshape(( d0, d1, self.num_attention_heads_per_partition, self.hidden_size_per_attention_head, ))? }; let value_layer = { let (d0, d1, d2, d3) = value_layer.dims4()?; value_layer .unsqueeze(D::Minus2)? .expand((d0, d1, d2, ratio, d3))? .reshape(( d0, d1, self.num_attention_heads_per_partition, self.hidden_size_per_attention_head, ))? }; let context_layer = self.core_attention .forward(&query_layer, &key_layer, &value_layer, attention_mask)?; let output = context_layer.apply(&self.dense)?; Ok(output) } } #[allow(clippy::upper_case_acronyms)] #[derive(Debug, Clone)] struct MLP { dense_h_to_4h: Linear, dense_4h_to_h: Linear, } impl MLP { fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> { let dense_h_to_4h = linear( cfg.hidden_size, cfg.ffn_hidden_size * 2, cfg.add_bias_linear, vb.pp("dense_h_to_4h"), )?; let dense_4h_to_h = linear( cfg.ffn_hidden_size, cfg.hidden_size, cfg.add_bias_linear, vb.pp("dense_4h_to_h"), )?; Ok(Self { dense_4h_to_h, dense_h_to_4h, }) } } impl Module for MLP { fn forward(&self, xs: &Tensor) -> Result<Tensor> { xs.apply(&self.dense_h_to_4h)? .apply(&candle_nn::Activation::Swiglu)? .apply(&self.dense_4h_to_h) } } #[derive(Debug, Clone)] struct Block { input_layernorm: candle_nn::LayerNorm, self_attention: SelfAttention, post_attention_layernorm: candle_nn::LayerNorm, mlp: MLP, apply_residual_connection_post_layernorm: bool, } impl Block { fn new(layer_number: usize, cfg: &Config, vb: VarBuilder) -> Result<Self> { let input_layernorm = if cfg.rmsnorm { candle_nn::rms_norm( cfg.hidden_size, cfg.layernorm_epsilon, vb.pp("input_layernorm"), )? .into_inner() } else { candle_nn::layer_norm( cfg.hidden_size, cfg.layernorm_epsilon, vb.pp("input_layernorm"), )? }; let post_attention_layernorm = if cfg.rmsnorm { candle_nn::rms_norm( cfg.hidden_size, cfg.layernorm_epsilon, vb.pp("post_attention_layernorm"), )? .into_inner() } else { candle_nn::layer_norm( cfg.hidden_size, cfg.layernorm_epsilon, vb.pp("post_attention_layernorm"), )? }; let self_attention = SelfAttention::new(layer_number, cfg, vb.pp("self_attention"))?; let mlp = MLP::new(cfg, vb.pp("mlp"))?; Ok(Self { input_layernorm, self_attention, post_attention_layernorm, mlp, apply_residual_connection_post_layernorm: cfg.apply_residual_connection_post_layernorm, }) } fn reset_kv_cache(&mut self) { self.self_attention.reset_kv_cache() } fn forward( &mut self, xs: &Tensor, attention_mask: &Option<Tensor>, rotary_emb: &RotaryEmbedding, ) -> Result<Tensor> { let layernorm_output = xs.apply(&self.input_layernorm)?; let attention_output = self.self_attention .forward(&layernorm_output, attention_mask, rotary_emb)?; let residual = if self.apply_residual_connection_post_layernorm { &layernorm_output } else { xs }; let layernorm_input = (residual + attention_output)?; let layernorm_output = layernorm_input.apply(&self.post_attention_layernorm)?; let mlp_output = layernorm_output.apply(&self.mlp)?; let residual = if self.apply_residual_connection_post_layernorm { &layernorm_output } else { &layernorm_input }; mlp_output + residual } } #[derive(Debug, Clone)] struct Transformer { layers: Vec<Block>, final_layernorm: Option<candle_nn::LayerNorm>, rotary_emb: RotaryEmbedding, } impl Transformer { fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> { let vb_l = vb.pp("layers"); let mut layers = Vec::with_capacity(cfg.num_layers); for layer_index in 0..cfg.num_layers { let block = Block::new(layer_index + 1, cfg, vb_l.pp(layer_index))?; layers.push(block) } let final_layernorm = if cfg.post_layer_norm { let ln = if cfg.rmsnorm { candle_nn::rms_norm( cfg.hidden_size, cfg.layernorm_epsilon, vb.pp("final_layernorm"), )? .into_inner() } else { candle_nn::layer_norm( cfg.hidden_size, cfg.layernorm_epsilon, vb.pp("final_layernorm"), )? }; Some(ln) } else { None }; let rotary_emb = RotaryEmbedding::new(cfg, vb.dtype(), vb.device())?; Ok(Self { layers, final_layernorm, rotary_emb, }) } fn reset_kv_cache(&mut self) { for block in self.layers.iter_mut() { block.reset_kv_cache() } } fn forward(&mut self, xs: &Tensor, attention_mask: &Option<Tensor>) -> Result<Tensor> { let mut xs = xs.clone(); for block in self.layers.iter_mut() { xs = block.forward(&xs, attention_mask, &self.rotary_emb)? } match self.final_layernorm.as_ref() { None => Ok(xs), Some(ln) => xs.apply(ln), } } } #[derive(Debug, Clone)] struct Embedding { word_embeddings: candle_nn::Embedding, fp32_residual_connection: bool, } impl Embedding { fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> { let word_embeddings = candle_nn::embedding( cfg.padded_vocab_size, cfg.hidden_size, vb.pp("word_embeddings"), )?; Ok(Self { word_embeddings, fp32_residual_connection: cfg.fp32_residual_connection, }) } } impl Module for Embedding { fn forward(&self, xs: &Tensor) -> Result<Tensor> { let xs = self.word_embeddings.forward(xs)?.transpose(0, 1)?; // b,s,h -> s,b,h if self.fp32_residual_connection { xs.to_dtype(candle::DType::F32) } else { xs.contiguous() } } } #[derive(Debug, Clone)] pub struct Model { embedding: Embedding, encoder: Transformer, output_layer: Linear, } fn get_mask(size: usize, device: &Device) -> Result<Tensor> { let mask: Vec<_> = (0..size) .flat_map(|i| (0..size).map(move |j| u8::from(j > i))) .collect(); Tensor::from_slice(&mask, (size, size), device) } impl Model { pub fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> { let vb = vb.pp("transformer"); let embedding = Embedding::new(cfg, vb.pp("embedding"))?; let encoder = Transformer::new(cfg, vb.pp("encoder"))?; let output_layer = linear( cfg.hidden_size, cfg.padded_vocab_size, false, vb.pp("output_layer"), )?; Ok(Self { embedding, encoder, output_layer, }) } pub fn reset_kv_cache(&mut self) { self.encoder.reset_kv_cache() } pub fn forward(&mut self, xs: &Tensor) -> Result<Tensor> { let (_b_size, seq_len) = xs.dims2()?; let input_embeds = xs.apply(&self.embedding)?; let attention_mask = if seq_len <= 1 { None } else { Some(get_mask(seq_len, xs.device())?) }; let xs = self.encoder.forward(&input_embeds, &attention_mask)?; let lm_logits = xs.i(seq_len - 1)?.apply(&self.output_layer)?; Ok(lm_logits) } }
8
0
hf_public_repos/candle/candle-transformers/src
hf_public_repos/candle/candle-transformers/src/models/quantized_t5.rs
//! T5 model implementation with quantization support. //! //! T5 is an encoder-decoder model pre-trained on a multi-task mixture of supervised //! and unsupervised tasks. This implementation provides quantization for reduced //! memory and compute requirements. //! //! Key characteristics: //! - Encoder-decoder architecture //! - Layer normalization //! - Relative positional encodings //! - Support for 8-bit quantization //! //! References: //! - 📝 [T5 Paper](https://arxiv.org/abs/1910.10683) //! - 🤗 [Model Card](https://huggingface.co/t5-base) //! - 🤗 Original model from [T5](https://github.com/huggingface/transformers/blob/main/src/transformers/models/t5/modeling_t5.py) use crate::models::t5::{deserialize_feed_forward_proj_activation, ActivationWithOptionalGating}; use crate::models::with_tracing::QMatMul; use crate::quantized_nn::Embedding; pub use crate::quantized_var_builder::VarBuilder; use candle::{DType, Device, Module, Result, Tensor, D}; use candle_nn::Activation; use serde::Deserialize; use std::sync::Arc; fn default_relative_attention_max_distance() -> usize { 128 } fn default_is_decoder() -> bool { false } fn default_use_cache() -> bool { true } fn default_tie_word_embeddings() -> bool { true } fn get_mask(size: usize, device: &Device) -> Result<Tensor> { let mask: Vec<_> = (0..size) .flat_map(|i| (0..size).map(move |j| u8::from(j > i))) .collect(); Tensor::from_slice(&mask, (size, size), device) } fn masked_fill(on_false: &Tensor, mask: &Tensor, on_true: f32) -> Result<Tensor> { let shape = mask.shape(); let on_true = Tensor::new(on_true, on_false.device())?.broadcast_as(shape.dims())?; let m = mask.where_cond(&on_true, on_false)?; Ok(m) } #[derive(Debug, Clone, PartialEq, Deserialize)] pub struct Config { vocab_size: usize, d_model: usize, d_kv: usize, d_ff: usize, num_layers: usize, num_decoder_layers: Option<usize>, num_heads: usize, relative_attention_num_buckets: usize, #[serde(default = "default_relative_attention_max_distance")] relative_attention_max_distance: usize, dropout_rate: f64, layer_norm_epsilon: f64, initializer_factor: f64, #[serde(default, deserialize_with = "deserialize_feed_forward_proj_activation")] pub feed_forward_proj: ActivationWithOptionalGating, #[serde(default = "default_tie_word_embeddings")] tie_word_embeddings: bool, #[serde(default = "default_is_decoder")] is_decoder: bool, is_encoder_decoder: bool, #[serde(default = "default_use_cache")] pub use_cache: bool, pub pad_token_id: usize, pub eos_token_id: usize, pub decoder_start_token_id: Option<usize>, } impl Default for Config { fn default() -> Self { Self { vocab_size: 32128, d_model: 512, d_kv: 64, d_ff: 2048, num_layers: 6, num_decoder_layers: None, num_heads: 8, relative_attention_num_buckets: 32, relative_attention_max_distance: 128, dropout_rate: 0.1, layer_norm_epsilon: 1e-6, initializer_factor: 1.0, feed_forward_proj: ActivationWithOptionalGating { gated: false, activation: Activation::Relu, }, tie_word_embeddings: true, is_decoder: false, is_encoder_decoder: true, use_cache: true, pad_token_id: 0, eos_token_id: 1, decoder_start_token_id: Some(0), } } } #[derive(Debug, Clone)] struct T5LayerNorm { weight: Tensor, variance_epsilon: f64, span: tracing::Span, } impl T5LayerNorm { fn load(h: usize, eps: f64, vb: VarBuilder) -> Result<Self> { let weight = vb.get(h, "weight")?.dequantize(vb.device())?; Ok(Self { weight, variance_epsilon: eps, span: tracing::span!(tracing::Level::TRACE, "layer-norm"), }) } } impl Module for T5LayerNorm { fn forward(&self, xs: &Tensor) -> Result<Tensor> { let _enter = self.span.enter(); let dtype = xs.dtype(); let xs_f32 = xs.to_dtype(DType::F32)?; // variance = hidden_states.to(torch.float32).pow(2).mean(-1, keepdim=True) let variance = xs_f32.sqr()?.mean_keepdim(D::Minus1)?; let xs = xs.broadcast_div(&(variance + self.variance_epsilon)?.sqrt()?)?; let xs = xs.to_dtype(dtype)?; let xs = xs.broadcast_mul(&self.weight)?; Ok(xs) } } #[derive(Debug, Clone)] struct T5DenseActDense { wi: QMatMul, wo: QMatMul, act: Activation, span: tracing::Span, } impl T5DenseActDense { fn load(vb: VarBuilder, cfg: &Config) -> Result<Self> { let wi = QMatMul::new(cfg.d_model, cfg.d_ff, vb.pp("wi"))?; let wo = QMatMul::new(cfg.d_ff, cfg.d_model, vb.pp("wo"))?; Ok(Self { wi, wo, act: Activation::Relu, span: tracing::span!(tracing::Level::TRACE, "dense-act-dense"), }) } } impl Module for T5DenseActDense { fn forward(&self, xs: &Tensor) -> Result<Tensor> { let _enter = self.span.enter(); let xs = self.wi.forward(xs)?; let xs = self.act.forward(&xs)?; let xs = self.wo.forward(&xs)?; Ok(xs) } } #[derive(Debug, Clone)] struct T5DenseGatedActDense { wi_0: QMatMul, wi_1: QMatMul, wo: QMatMul, act: Activation, span: tracing::Span, } impl T5DenseGatedActDense { fn load(vb: VarBuilder, cfg: &Config) -> Result<Self> { let wi_0 = QMatMul::new(cfg.d_model, cfg.d_ff, vb.pp("wi_0"))?; let wi_1 = QMatMul::new(cfg.d_model, cfg.d_ff, vb.pp("wi_1"))?; let wo = QMatMul::new(cfg.d_ff, cfg.d_model, vb.pp("wo"))?; Ok(Self { wi_0, wi_1, wo, act: cfg.feed_forward_proj.activation, span: tracing::span!(tracing::Level::TRACE, "dense-gated-act-dense"), }) } } impl Module for T5DenseGatedActDense { fn forward(&self, xs: &Tensor) -> Result<Tensor> { let _enter = self.span.enter(); let hidden_gelu = self.act.forward(&self.wi_0.forward(xs)?)?; let hidden_linear = self.wi_1.forward(xs)?; let xs = hidden_gelu.broadcast_mul(&hidden_linear)?; let xs = self.wo.forward(&xs)?; Ok(xs) } } #[derive(Debug, Clone)] struct T5LayerFF { dense_act: Option<T5DenseActDense>, gated_dense_act: Option<T5DenseGatedActDense>, layer_norm: T5LayerNorm, span: tracing::Span, } impl T5LayerFF { fn load(vb: VarBuilder, cfg: &Config) -> Result<Self> { let layer_norm = T5LayerNorm::load(cfg.d_model, cfg.layer_norm_epsilon, vb.pp("layer_norm"))?; let (dense_act, gated_dense_act) = if cfg.feed_forward_proj.gated { ( None, Some(T5DenseGatedActDense::load(vb.pp("DenseReluDense"), cfg)?), ) } else { ( Some(T5DenseActDense::load(vb.pp("DenseReluDense"), cfg)?), None, ) }; Ok(Self { dense_act, gated_dense_act, layer_norm, span: tracing::span!(tracing::Level::TRACE, "layer-ff"), }) } } impl Module for T5LayerFF { fn forward(&self, xs: &Tensor) -> Result<Tensor> { let _enter = self.span.enter(); let ys = self.layer_norm.forward(xs)?; let ys = match &self.dense_act { Some(dense_act) => dense_act.forward(&ys)?, None => self.gated_dense_act.as_ref().unwrap().forward(&ys)?, }; let xs = (xs + ys)?; Ok(xs) } } #[derive(Debug, Clone)] struct T5Attention { q: QMatMul, k: QMatMul, v: QMatMul, o: QMatMul, n_heads: usize, d_kv: usize, relative_attention_bias: Option<Embedding>, relative_attention_num_buckets: usize, relative_attention_max_distance: usize, inner_dim: usize, use_cache: bool, kv_cache: Option<(Tensor, Tensor)>, span: tracing::Span, span_cache: tracing::Span, span_mm: tracing::Span, span_sm: tracing::Span, } impl T5Attention { fn load( has_relative_attention_bias: bool, decoder: bool, vb: VarBuilder, cfg: &Config, ) -> Result<Self> { let inner_dim = cfg.num_heads * cfg.d_kv; let q = QMatMul::new(cfg.d_model, inner_dim, vb.pp("q"))?; let k = QMatMul::new(cfg.d_model, inner_dim, vb.pp("k"))?; let v = QMatMul::new(cfg.d_model, inner_dim, vb.pp("v"))?; let o = QMatMul::new(inner_dim, cfg.d_model, vb.pp("o"))?; let relative_attention_bias = if has_relative_attention_bias { let emb = Embedding::new( cfg.relative_attention_num_buckets, cfg.num_heads, vb.pp("relative_attention_bias"), )?; Some(emb) } else { None }; Ok(Self { q, k, v, o, n_heads: cfg.num_heads, d_kv: cfg.d_kv, relative_attention_bias, relative_attention_num_buckets: cfg.relative_attention_num_buckets, relative_attention_max_distance: cfg.relative_attention_max_distance, inner_dim, use_cache: cfg.use_cache && decoder, kv_cache: None, span: tracing::span!(tracing::Level::TRACE, "attention"), span_cache: tracing::span!(tracing::Level::TRACE, "attention-cache"), span_mm: tracing::span!(tracing::Level::TRACE, "attention-mm"), span_sm: tracing::span!(tracing::Level::TRACE, "attention-sm"), }) } fn forward( &mut self, xs: &Tensor, position_bias: Option<&Tensor>, key_value_states: Option<&Tensor>, mask: Option<&Tensor>, ) -> Result<(Tensor, Option<Tensor>)> { // Performs Self-attention (if key_value_states is None) or attention // over source sentence (provided by key_value_states). let _enter = self.span.enter(); let kv_input = match key_value_states { None => xs, Some(key_value_states) => key_value_states, }; let (b_sz, q_len) = (xs.dim(0)?, xs.dim(1)?); let kv_len = kv_input.dim(1)?; let q = self.q.forward(xs)?; let k = self.k.forward(kv_input)?; let v = self.v.forward(kv_input)?; let q = q .reshape((b_sz, q_len, self.n_heads, self.d_kv))? .transpose(1, 2)? .contiguous()?; let mut k = k .reshape((b_sz, kv_len, self.n_heads, self.d_kv))? .transpose(1, 2)?; let mut v = v .reshape((b_sz, kv_len, self.n_heads, self.d_kv))? .transpose(1, 2)?; if self.use_cache && key_value_states.is_none() { let _enter = self.span_cache.enter(); if let Some((kv_cache_k, kv_cache_v)) = &self.kv_cache { k = Tensor::cat(&[kv_cache_k, &k], 2)?; v = Tensor::cat(&[kv_cache_v, &v], 2)?; }; self.kv_cache = Some((k.clone(), v.clone())); }; let k = k.contiguous()?; let v = v.contiguous()?; // TODO: Use flash_attn. let scores = { let _enter = self.span_mm.enter(); q.matmul(&k.t()?)? }; let scores = match mask { None => scores, Some(mask) => masked_fill( &scores, &mask .unsqueeze(0)? .unsqueeze(0)? .repeat((b_sz, self.n_heads))?, f32::NEG_INFINITY, )?, }; let (scores, position_bias) = match position_bias { Some(position_bias) => ( scores.broadcast_add(position_bias)?, Some(position_bias.clone()), ), None => match &self.relative_attention_bias { None => (scores, None), Some(relative_attention_bias) => { // This only handles the bidirectional case. let kv_len = k.dim(2)?; let (q_start, q_end) = match self.use_cache { true => ((kv_len - q_len) as u32, kv_len as u32), false => (0_u32, kv_len as u32), }; let num_buckets = self.relative_attention_num_buckets as u32 / 2; let max_exact = num_buckets / 2; let relative_position = (q_start..q_end) .map(|i| { (0..kv_len as u32) .map(|j| { if i < j { if j - i < max_exact { j - i + num_buckets } else { let b = f32::log( (j - i) as f32 / max_exact as f32, self.relative_attention_max_distance as f32 / max_exact as f32, ) * (num_buckets - max_exact) as f32; u32::min( max_exact + num_buckets + b as u32, self.relative_attention_num_buckets as u32 - 1, ) } } else if i - j < max_exact { i - j } else { let b = f32::log( (i - j) as f32 / max_exact as f32, self.relative_attention_max_distance as f32 / max_exact as f32, ) * (num_buckets - max_exact) as f32; max_exact + b as u32 } }) .collect::<Vec<u32>>() }) .collect::<Vec<Vec<_>>>(); let relative_buckets = Tensor::new(relative_position, q.device())?; let position_bias = relative_attention_bias .forward(&relative_buckets)? .permute((2, 0, 1))? .unsqueeze(0)?; (scores.broadcast_add(&position_bias)?, Some(position_bias)) // TODO: position_bias_masked? } }, }; let attn_weights = { let _enter = self.span_sm.enter(); candle_nn::ops::softmax_last_dim(&scores)? }; let attn_output = attn_weights.matmul(&v)?; let attn_output = attn_output .transpose(1, 2)? .reshape((b_sz, q_len, self.inner_dim))?; let attn_output = self.o.forward(&attn_output)?; Ok((attn_output, position_bias)) } fn clear_kv_cache(&mut self) { self.kv_cache = None } } #[derive(Debug, Clone)] struct T5LayerSelfAttention { self_attention: T5Attention, layer_norm: T5LayerNorm, span: tracing::Span, } impl T5LayerSelfAttention { fn load(h: bool, d: bool, vb: VarBuilder, cfg: &Config) -> Result<Self> { let self_attention = T5Attention::load(h, d, vb.pp("SelfAttention"), cfg)?; let layer_norm = T5LayerNorm::load(cfg.d_model, cfg.layer_norm_epsilon, vb.pp("layer_norm"))?; Ok(Self { self_attention, layer_norm, span: tracing::span!(tracing::Level::TRACE, "self-attn"), }) } fn forward( &mut self, xs: &Tensor, position_bias: Option<&Tensor>, mask: Option<&Tensor>, ) -> Result<(Tensor, Option<Tensor>)> { let _enter = self.span.enter(); let normed_xs = self.layer_norm.forward(xs)?; let (ys, position_bias) = self.self_attention .forward(&normed_xs, position_bias, None, mask)?; let ys = (xs + ys)?; Ok((ys, position_bias)) } fn clear_kv_cache(&mut self) { self.self_attention.clear_kv_cache() } } #[derive(Debug, Clone)] struct T5LayerCrossAttention { cross_attention: T5Attention, layer_norm: T5LayerNorm, span: tracing::Span, } impl T5LayerCrossAttention { fn load(decoder: bool, vb: VarBuilder, cfg: &Config) -> Result<Self> { let cross_attention = T5Attention::load(false, decoder, vb.pp("EncDecAttention"), cfg)?; let layer_norm = T5LayerNorm::load(cfg.d_model, cfg.layer_norm_epsilon, vb.pp("layer_norm"))?; Ok(Self { cross_attention, layer_norm, span: tracing::span!(tracing::Level::TRACE, "cross-attn"), }) } fn forward( &mut self, hidden_states: &Tensor, position_bias: Option<&Tensor>, key_value_states: &Tensor, ) -> Result<(Tensor, Option<Tensor>)> { let _enter = self.span.enter(); let normed_hidden_states = self.layer_norm.forward(hidden_states)?; let (ys, position_bias) = self.cross_attention.forward( &normed_hidden_states, position_bias, Some(key_value_states), None, )?; let ys = (hidden_states + ys)?; Ok((ys, position_bias)) } fn clear_kv_cache(&mut self) { self.cross_attention.clear_kv_cache() } } #[derive(Debug, Clone)] struct T5Block { self_attn: T5LayerSelfAttention, cross_attn: Option<T5LayerCrossAttention>, ff: T5LayerFF, span: tracing::Span, } impl T5Block { fn load( has_relative_attention_bias: bool, decoder: bool, vb: VarBuilder, cfg: &Config, ) -> Result<Self> { let vb = vb.pp("layer"); let self_attn = T5LayerSelfAttention::load(has_relative_attention_bias, decoder, vb.pp("0"), cfg)?; let cross_attn = if cfg.is_decoder { Some(T5LayerCrossAttention::load(decoder, vb.pp("1"), cfg)?) } else { None }; let ff_i = if cross_attn.is_some() { 2 } else { 1 }; let ff = T5LayerFF::load(vb.pp(ff_i), cfg)?; Ok(Self { self_attn, cross_attn, ff, span: tracing::span!(tracing::Level::TRACE, "block"), }) } fn forward( &mut self, xs: &Tensor, position_bias: Option<&Tensor>, encoder_hidden_states: Option<&Tensor>, ) -> Result<(Tensor, Option<Tensor>)> { let _enter = self.span.enter(); // TODO: Cache masks let mask = match self.cross_attn.is_some() { true => { let mask_len = xs.dim(1)?; // If the input seq length is 1, no need for a mask, this is also helpful to avoid shape // issues when using the KV cache in the decoder. if mask_len <= 1 { None } else { Some(get_mask(mask_len, xs.device())?) } } false => None, }; let (mut xs, position_bias) = self.self_attn.forward(xs, position_bias, mask.as_ref())?; // TODO: clamp for f16? if let Some(cross_attn) = &mut self.cross_attn { (xs, _) = cross_attn.forward(&xs, None, encoder_hidden_states.unwrap())?; // TODO: clamp for f16? } let xs = self.ff.forward(&xs)?; // TODO: clamp for f16? Ok((xs, position_bias)) } fn clear_kv_cache(&mut self) { self.self_attn.clear_kv_cache(); self.cross_attn.iter_mut().for_each(|c| c.clear_kv_cache()); } } #[derive(Debug, Clone)] struct T5Stack { block: Vec<T5Block>, shared: Arc<Embedding>, final_layer_norm: T5LayerNorm, span: tracing::Span, } impl T5Stack { fn load(decoder: bool, vb: VarBuilder, shared: &Arc<Embedding>, cfg: &Config) -> Result<Self> { let block = (0..cfg.num_layers) .map(|i| T5Block::load(i == 0, decoder, vb.pp(format!("block.{i}")), cfg)) .collect::<Result<Vec<_>>>()?; let final_layer_norm = T5LayerNorm::load( cfg.d_model, cfg.layer_norm_epsilon, vb.pp("final_layer_norm"), )?; Ok(Self { block, shared: shared.clone(), final_layer_norm, span: tracing::span!(tracing::Level::TRACE, "stack"), }) } fn forward( &mut self, input_ids: &Tensor, encoder_hidden_states: Option<&Tensor>, ) -> Result<Tensor> { let _enter = self.span.enter(); let input_embeds = self.shared.as_ref().forward(input_ids)?; let mut hidden_states = input_embeds; let mut position_bias = None; for block in self.block.iter_mut() { (hidden_states, position_bias) = block.forward( &hidden_states, position_bias.as_ref(), encoder_hidden_states, )? } self.final_layer_norm.forward(&hidden_states) } fn clear_kv_cache(&mut self) { self.block.iter_mut().for_each(|b| b.clear_kv_cache()) } } #[derive(Debug, Clone)] pub struct T5EncoderModel { encoder: T5Stack, device: Device, span: tracing::Span, } impl T5EncoderModel { pub fn load(vb: VarBuilder, cfg: &Config) -> Result<Self> { let shared_vb = if vb.contains_key("shared.weight") { vb.pp("shared") } else { vb.pp("decoder").pp("embed_tokens") }; let shared = Embedding::new(cfg.vocab_size, cfg.d_model, shared_vb)?; let shared = Arc::new(shared); let encoder = T5Stack::load(false, vb.pp("encoder"), &shared, cfg)?; Ok(Self { encoder, device: vb.device().clone(), span: tracing::span!(tracing::Level::TRACE, "encoder"), }) } pub fn forward(&mut self, input_ids: &Tensor) -> Result<Tensor> { let _enter = self.span.enter(); self.encoder.forward(input_ids, None) } pub fn device(&self) -> &Device { &self.device } pub fn clear_kv_cache(&mut self) { self.encoder.clear_kv_cache() } } #[derive(Debug, Clone)] pub struct T5ForConditionalGeneration { encoder: T5Stack, decoder: T5Stack, d_model: usize, tie_word_embeddings: bool, lm_head: Option<QMatMul>, shared: Arc<Embedding>, device: Device, span_decode: tracing::Span, span_decode_head: tracing::Span, } impl T5ForConditionalGeneration { pub fn load(vb: VarBuilder, cfg: &Config) -> Result<Self> { assert!(cfg.is_encoder_decoder); let d_model = cfg.d_model; let shared_vb = if vb.contains_key("shared.weight") { vb.pp("shared") } else { vb.pp("decoder").pp("embed_tokens") }; let shared = Embedding::new(cfg.vocab_size, cfg.d_model, shared_vb)?; let shared = Arc::new(shared); let mut encoder_cfg = cfg.clone(); encoder_cfg.is_decoder = false; encoder_cfg.use_cache = false; encoder_cfg.is_encoder_decoder = false; let encoder = T5Stack::load(false, vb.pp("encoder"), &shared, &encoder_cfg)?; let mut decoder_cfg = cfg.clone(); decoder_cfg.is_decoder = true; decoder_cfg.is_encoder_decoder = false; decoder_cfg.num_layers = cfg.num_decoder_layers.unwrap_or(cfg.num_layers); let decoder = T5Stack::load(true, vb.pp("decoder"), &shared, &decoder_cfg)?; let tie_word_embeddings = cfg.tie_word_embeddings; let lm_head = if tie_word_embeddings { None } else { Some(QMatMul::new(cfg.d_model, cfg.vocab_size, vb.pp("lm_head"))?) }; Ok(Self { encoder, decoder, d_model, tie_word_embeddings, lm_head, shared, device: vb.device().clone(), span_decode: tracing::span!(tracing::Level::TRACE, "decode"), span_decode_head: tracing::span!(tracing::Level::TRACE, "decode-head"), }) } pub fn encode(&mut self, input_ids: &Tensor) -> Result<Tensor> { self.encoder.forward(input_ids, None) } pub fn decode( &mut self, decoder_input_ids: &Tensor, encoder_output: &Tensor, ) -> Result<Tensor> { let _enter = self.span_decode.enter(); let decoder_output = self .decoder .forward(decoder_input_ids, Some(encoder_output))?; let scaling_factor = if self.tie_word_embeddings { // Rescale output before projecting on vocab // See https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/transformer/transformer.py#L586 (self.d_model as f64).sqrt() } else { 1.0 }; let sequence_output = ((decoder_output .narrow(1, decoder_output.dim(1)? - 1, 1)? .squeeze(1)?) * scaling_factor)?; let output = { let _enter = self.span_decode_head.enter(); match self.lm_head { None => sequence_output.matmul(&self.shared.embeddings().t()?)?, Some(ref lm_head) => lm_head.forward(&sequence_output)?, } }; Ok(output) } pub fn forward(&mut self, input_ids: &Tensor, decoder_input_ids: &Tensor) -> Result<Tensor> { let encoder_output = self.encode(input_ids)?; self.decode(decoder_input_ids, &encoder_output) } pub fn device(&self) -> &Device { &self.device } pub fn clear_kv_cache(&mut self) { self.encoder.clear_kv_cache(); self.decoder.clear_kv_cache(); } }
9
0
hf_public_repos
hf_public_repos/blog/open-llm-leaderboard-drop.md
--- title: "Open LLM Leaderboard: DROP deep dive" thumbnail: /blog/assets/evaluating-mmlu-leaderboard/thumbnail.png authors: - user: clefourrier - user: cabreraalex guest: true - user: stellaathena guest: true - user: SaylorTwift - user: thomwolf --- # Open LLM Leaderboard: DROP deep dive Recently, [three new benchmarks](https://twitter.com/clefourrier/status/1722555555338956840) were added to the [Open LLM Leaderboard](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard): Winogrande, GSM8k and DROP, using the original implementations reproduced in the [EleutherAI Harness](https://github.com/EleutherAI/lm-evaluation-harness/). A cursory look at the scores for DROP revealed something strange was going on, with the overwhelming majority of models scoring less than 10 out of 100 on their f1-score! We did a deep dive to understand what was going on, come with us to see what we found out! ## Initial observations DROP (Discrete Reasoning Over Paragraphs) is an evaluation where models must extract relevant information from English-text paragraphs before executing discrete reasoning steps on them (for example, sorting or counting items to arrive at the correct answer, see the table below for examples). The metrics used are custom f1 and exact match scores. <div align="center"> <figure class="image table text-center m-0 w-full"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/blog/open-llm-leaderboard/drop/drop_example.png" width="500" /> <figcaption>Examples of reasoning and paragraph from the original article.</figcaption> </figure> </div> We added it to the Open LLM Leaderboard three weeks ago, and observed that the f1-scores of pretrained models followed an unexpected trend: when we plotted DROP scores against the leaderboard original average (of ARC, HellaSwag, TruthfulQA and MMLU), which is a reasonable proxy for overall model performance, we expected DROP scores to be correlated with it (with better models having better performance). However, this was only the case for a small number of models, and all the others had a very low DROP f1-score, below 10. <div align="center"> <figure class="image table text-center m-0 w-full"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/blog/open-llm-leaderboard/drop/drop_bimodal.png" width="500" /> <figcaption>Two trends can be observed in the DROP scores: some follow the average (in diagonal), others are stuck around 5 (vertical line on the right of the graph).</figcaption> </figure> </div> ## Normalization interrogations During our first deeper dive in these surprising behavior, we observed that the normalization step was possibly not working as intended: in some cases, this normalization ignored the correct numerical answers when they were directly followed by a whitespace character other than a space (a line return, for example). Let's look at an example, with the generation being `10\n\nPassage: The 2011 census recorded a population of 1,001,360`, and the gold answer being `10`. Normalization happens in several steps, both for generation and gold: 1) **Split on separators** `|`, `-`, or ` ` The beginning sequence of the generation `10\n\nPassage:` contain no such separator, and is therefore considered a single entity after this step. 2) **Punctuation removal** The first token then becomes `10\n\nPassage` (`:` is removed) 3) **Homogenization of numbers** Every string that can be cast to float is considered a number and cast to float, then re-converted to string. `10\n\nPassage` stays the same, as it cannot be cast to float, whereas the gold `10` becomes `10.0`. 4) **Other steps** A lot of other normalization steps ensue (removing articles, removing other whitespaces, etc.) and our original example becomes `10 passage 2011.0 census recorded population of 1001360.0`. However, the overall score is not computed on the string, but on the bag of words (BOW) extracted from the string, here `{'recorded', 'population', 'passage', 'census', '2011.0', '1001360.0', '10'}`, which is compared with the BOW of the gold, also normalized in the above manner, `{10.0}`. As you can see, they don’t intersect, even though the model predicted the correct output! In summary, if a number is followed by any kind of whitespace other than a simple space, it will not pass through the number normalization, hence never match the gold if it is also a number! This first issue was likely to mess up the scores quite a bit, but clearly it was not the only factor causing DROP scores to be so low. We decided to investigate a bit more. ## Diving into the results Extending our investigations, our friends at [Zeno](https://zenoml.com) joined us and [undertook a much more thorough exploration](https://hub.zenoml.com/report/1255/DROP%20Benchmark%20Exploration) of the results, looking at 5 models which were representative of the problems we noticed in DROP scores: falcon-180B and mistral-7B were underperforming compared to what we were expecting, Yi-34B and tigerbot-70B had a very good performance on DROP correlated with their average scores, and facebook/xglm-7.5B fell in the middle. You can give analyzing the results a try [in the Zeno project here](https://hub.zenoml.com/project/2f5dec90-df5e-4e3e-a4d1-37faf814c5ae/OpenLLM%20Leaderboard%20DROP%20Comparison/explore?params=eyJtb2RlbCI6ImZhY2Vib29rX194Z2xtLTcuNUIiLCJtZXRyaWMiOnsiaWQiOjk1NjUsIm5hbWUiOiJmMSIsInR5cGUiOiJtZWFuIiwiY29sdW1ucyI6WyJmMSJdfSwiY29tcGFyaXNvbk1vZGVsIjoiVGlnZXJSZXNlYXJjaF9fdGlnZXJib3QtNzBiLWNoYXQiLCJjb21wYXJpc29uQ29sdW1uIjp7ImlkIjoiYzJmNTY1Y2EtYjJjZC00MDkwLWIwYzctYTNiNTNkZmViM2RiIiwibmFtZSI6ImVtIiwiY29sdW1uVHlwZSI6IkZFQVRVUkUiLCJkYXRhVHlwZSI6IkNPTlRJTlVPVVMiLCJtb2RlbCI6ImZhY2Vib29rX194Z2xtLTcuNUIifSwiY29tcGFyZVNvcnQiOltudWxsLHRydWVdLCJtZXRyaWNSYW5nZSI6W251bGwsbnVsbF0sInNlbGVjdGlvbnMiOnsic2xpY2VzIjpbXSwibWV0YWRhdGEiOnt9LCJ0YWdzIjpbXX19) if you want to! The Zeno team found two even more concerning features: 1) Not a single model got a correct result on floating point answers 2) High quality models which generate long answers actually have a lower f1-score At this point, we believed that both failure cases were actually caused by the same root factor: using `.` as a stopword token (to end the generations): 1) Floating point answers are systematically interrupted before their generation is complete 2) Higher quality models, which try to match the few-shot prompt format, will generate `Answer\n\nPlausible prompt for the next question.`, and only stop during the plausible prompt continuation after the actual answer on the first `.`, therefore generating too many words and getting a bad f1 score. We hypothesized that both these problems could be fixed by using `\n` instead of `.` as an end of generation stop word. ## Changing the end of generation token So we gave it a try! We investigated using `\n` as the end of generation token on the available results. We split the generated answer on the first `\n` it contained, if one was present, and recomputed the scores. *Note that this is only an approximation of the correct result, as it won't fix answers that were cut too early on `.` (for example floating point answers) - but it also won’t give unfair advantage to any model, as all of them were affected by this problem. However it’s the best we could do without rerunning models (as we wanted to keep the community posted as soon as possible).* The results we got were the following - splitting on `\n` correlates really well with other scores and therefore with overall performance. <div align="center"> <figure class="image table text-center m-0 w-full"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/blog/open-llm-leaderboard/drop/drop_partial_fix.png" width="500" /> <figcaption>We can see in orange that the scores computed on the new strings correlate much better with the average performance.</figcaption> </figure> </div> ## So what's next? A quick calculation shows that re-running the full evaluation of all models would be quite costly (the full update took 8 years of GPU time, and a lot of it was taken by DROP), we estimated how much it would cost to only re-run failing examples. In 10% of the cases, the gold answer is a floating number (for example `12.25`) and model predictions start with the correct beginning (for our example, `12`) but are cut off on a `.` - these predictions likely would have actually been correct if the generation was to continue. We would definitely need to re-run them! Our estimation does not count generated sentences that finish with a number which was possibly interrupted (40% of the other generations), nor any prediction messed up by its normalization. To get correct results, we would thus need to re-run more than 50% of the examples, a huge amount of GPU time! We need to be certain that the implementation we'll run is correct this time. After discussing it with the fantastic EleutherAI team (both on [GitHub](https://github.com/EleutherAI/lm-evaluation-harness/issues/978) and internally), who guided us through the code and helped our investigations, it became very clear that the LM Eval Harness implementation follows the "official DROP" code very strictly: a new version of this benchmark’s evaluation thus needs to be developed! **We have therefore taken the decision to remove DROP from the Open LLM Leaderboard until a new version arises.** One take away of this investiguation is the value in having the many eyes of the community collaboratively investiguate a benchmark in order to detect errors that were previously missed. Here again the power of open-source, community and developping in the open-shines in that it allows to transparently investigate the root cause of an issue on a benchmark which has been out there for a couple of years. We hope that interested members of the community will join forces with academics working on DROP evaluation to fix both its scoring and its normalization. We'd love it becomes usable again, as the dataset itself is really quite interesting and cool. We encourage you to provide feedback on how we should evaluate DROP [on this issue](https://github.com/EleutherAI/lm-evaluation-harness/issues/1050). Thanks to the many community members who pointed out issues on DROP scores, and many thanks to the EleutherAI Harness and Zeno teams for their great help on this issue.
0
0
hf_public_repos
hf_public_repos/blog/pref-tuning.md
--- title: 'Preference Tuning LLMs with Direct Preference Optimization Methods' thumbnail: /blog/assets/pref-tuning/thumbnail.jpg authors: - user: kashif - user: edbeeching - user: lewtun - user: lvwerra - user: osanseviero --- # Preference Tuning LLMs with Direct Preference Optimization Methods **Addendum** After consulting with the authors of the [IPO paper](https://arxiv.org/abs/2310.12036), we discovered that the implementation of IPO in TRL was incorrect; in particular, the loss over the log-likelihoods of the completions needs to be _averaged_ instead of _summed_. We have added a fix in [this PR](https://github.com/huggingface/trl/pull/1265) and re-run the experiments. The results are now consistent with the paper, with IPO on par with DPO and performing better than KTO in the paired preference setting. We have updated the post to reflect these new results. **TL;DR** We evaluate three promising methods to align language models without reinforcement learning (or preference tuning) on a number of models and hyperparameter settings. In particular we train using different hyperparameters and evaluate on: * [Direct Preference Optimization](https://huggingface.co/papers/2305.18290) (DPO) * [Identity Preference Optimisation](https://huggingface.co/papers/2310.12036) (IPO) * [Kahneman-Tversky Optimisation](https://github.com/ContextualAI/HALOs) (KTO) ## Introduction In this post, we perform an empirical evaluation of three promising LLM alignment algorithms: Direct Preference Optimization (DPO), Identity Preference Optimisation (IPO) and Kahneman-Tversky Optimisation (KTO). We conducted our experiments on two high quality 7b LLMs that have undergone a supervised fine-tuning step, but no preference alignment. We find that while one algorithm clearly outshines the others, there are key hyper-parameters that must be tuned to achieve the best results. ## Alignment without Reinforcement Learning |![Image from the DPO paper ([https://arxiv.org/abs/2305.18290](https://arxiv.org/pdf/2305.18290.pdf))](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/blog/pref_tuning/dpo.png)| |:--:| |Image from the DPO paper ([https://arxiv.org/abs/2305.18290](https://arxiv.org/pdf/2305.18290.pdf))| [Direct Preference Optimization (DPO)](https://huggingface.co/papers/2305.18290) has emerged as a promising alternative for aligning Large Language Models (LLMs) to human or AI preferences. Unlike [traditional alignment methods](https://huggingface.co/blog/rlhf), which are based on reinforcement learning, DPO recasts the alignment formulation as a simple loss function that can be optimised directly on a dataset of preferences \\( \{(x, y_w, y_l)\} \\), where \\(x\\) is a prompt and \\(y_w,y_l\\) are the preferred and dispreferred responses. |![Sample preference dataset](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/blog/pref_tuning/data.png)| |:--:| |Sample of a preference tuning dataset.| This makes DPO simple to use in practice and has been applied with success to train models like [Zephyr](https://huggingface.co/HuggingFaceH4/zephyr-7b-beta) and [Intel’s NeuralChat](https://huggingface.co/Intel/neural-chat-7b-v3-3). The success of DPO has prompted researchers to develop new loss functions that generalise the method in two main directions: * **Robustness**: One shortcoming of DPO is that it tends to quickly overfit on the preference dataset. To avoid this, researchers at Google DeepMind introduced [Identity Preference Optimisation (IPO)](https://huggingface.co/papers/2310.12036), which adds a regularisation term to the DPO loss and enables one to train models to convergence without requiring tricks like early stopping. * **Dispensing with paired preference data altogether**: Like most alignment methods, DPO requires a dataset of paired preferences \\( \{(x, y_w, y_l)\} \\), where annotators label which response is better according to a set of criteria like helpfulness or harmfulness. In practice, creating these datasets is a time consuming and costly endeavour. ContextualAI recently proposed an interesting alternative called [Kahneman-Tversky Optimisation (KTO)](https://github.com/ContextualAI/HALOs/blob/main/assets/report.pdf), which defines the loss function entirely in terms of individual examples that have been labelled as "good" or "bad" (for example, the 👍 or 👎 icons one sees in chat UIs). These labels are much easier to acquire in practice and KTO is a promising way to continually update chat models running in production environments. At the same time, these various methods come with hyperparameters, the most important one being \\( \beta \\), which controls how much to weight the preference of the reference model. With these alternatives now available in the practitioner’s arsenal through libraries like 🤗 [TRL](https://github.com/huggingface/trl), a natural question then becomes which of these methods and hyperparameters produce the best chat model? This post aims to answer this question by performing an empirical analysis of the three methods. We will sweep over key hyperparameters such as \\(\beta\\) and training steps, then evaluate the resulting models’ performance via [MT-Bench](https://huggingface.co/spaces/lmsys/mt-bench), which is a common benchmark to measure chat model capabilities. We provide open-source code to replicate these results in a recent update to the 🤗 [alignment-handbook](https://github.com/huggingface/alignment-handbook). Let’s get started! ## Links Here are the important links associated with our analysis: - Code and config files to perform the hyperparameter scan: [https://github.com/huggingface/alignment-handbook/tree/main/recipes/pref_align_scan](https://github.com/huggingface/alignment-handbook/tree/main/recipes/pref_align_scan) - 📚 The collection of dataset and models we used: [https://huggingface.co/collections/alignment-handbook/dpo-vs-kto-vs-ipo-65a69c5f03548d61dbe29ef8](https://huggingface.co/collections/alignment-handbook/dpo-vs-kto-vs-ipo-65a69c5f03548d61dbe29ef8) ## Experimental Setup There are two main ingredients that one needs to consider when performing alignment experiments: the model we choose to optimize and the alignment dataset. To get more independent data points, we considered two models, [OpenHermes-2.5-Mistral-7B](https://huggingface.co/teknium/OpenHermes-2.5-Mistral-7B) and [Zephyr-7b-beta-sft](https://huggingface.co/alignment-handbook/zephyr-7b-sft-full), and two alignment datasets Intel’s [orca_dpo_pairs](https://huggingface.co/datasets/Intel/orca_dpo_pairs) and the [ultrafeedback-binarized](https://huggingface.co/datasets/HuggingFaceH4/ultrafeedback_binarized) dataset. For the first experiment, we used [OpenHermes-2.5-Mistral-7B](https://huggingface.co/teknium/OpenHermes-2.5-Mistral-7B) as it’s one of the best 7B parameter chat models that hasn’t been subject to any alignment techniques. We then used Intel’s `orca_dpo_pairs` [dataset](https://huggingface.co/datasets/Intel/orca_dpo_pairs), which consists of 13k prompts where the chosen response is generated by GPT-4, and the undesired response is generated by Llama-Chat 13b. This is the dataset behind NeuralChat and NeuralHermes-2.5-Mistral-7B. Since KTO doesn’t require pairwise preferences per se, we simply treat the GPT-4 responses as “good” labels and the Llama-Chat 13b ones as “bad”. While GPT-4's responses are likely to be preferred over Llama-Chat 13b, there may be some cases where Llama-Chat-13b produces a better response, we consider this to represent a small minority of the examples. The second experiment performed preference alignment on the[Zephyr-7b-beta-sft](https://huggingface.co/alignment-handbook/zephyr-7b-sft-full) model with the [ultrafeedback-binarized](https://huggingface.co/datasets/HuggingFaceH4/ultrafeedback_binarized) dataset, which contains 66k prompts with pairs of chosen and rejected responses. This dataset was used to train the original Zephyr model, which at the time was the best in class 7B model on numerous automated benchmarks and human evaluations. ## Configuring the experiments The alignment handbook provides an easy way to configure a single experiment, these parameters are used to configure the [run_dpo.py](https://github.com/huggingface/alignment-handbook/blob/main/scripts/run_dpo.py) script. ```yaml # Model arguments model_name_or_path: teknium/OpenHermes-2.5-Mistral-7B torch_dtype: null # Data training arguments dataset_mixer: HuggingFaceH4/orca_dpo_pairs: 1.0 dataset_splits: - train_prefs - test_prefs preprocessing_num_workers: 12 # Training arguments with sensible defaults bf16: true beta: 0.01 loss_type: sigmoid do_eval: true do_train: true evaluation_strategy: steps eval_steps: 100 gradient_accumulation_steps: 2 gradient_checkpointing: true gradient_checkpointing_kwargs: use_reentrant: False hub_model_id: HuggingFaceH4/openhermes-2.5-mistral-7b-dpo hub_model_revision: v1.0 learning_rate: 5.0e-7 logging_steps: 10 lr_scheduler_type: cosine max_prompt_length: 512 num_train_epochs: 1 optim: adamw_torch output_dir: data/openhermes-2.5-mistral-7b-dpo-v1.0 per_device_train_batch_size: 8 per_device_eval_batch_size: 8 push_to_hub_revision: true save_strategy: "steps" save_steps: 100 save_total_limit: 1 seed: 42 warmup_ratio: 0.1 ``` We created a similar base configuration file for the Zephyr experiments. Chat templates were automatically inferred from the base Chat model, with OpenHermes-2.5 using ChatML format and Zephyr using the H4 chat template. Alternatively, if you want to use your own chat format, the 🤗 tokenizers library has now enabled user-defined chat templates using a jinja format strings: ```bash # Example of the Zephyr chat template "{% for message in messages %}\n{% if message['role'] == 'user' %}\n{{ '<|user|>\n' + message['content'] + eos_token }}\n{% elif message['role'] == 'system' %}\n{{ '<|system|>\n' + message['content'] + eos_token }}\n{% elif message['role'] == 'assistant' %}\n{{ '<|assistant|>\n' + message['content'] + eos_token }}\n{% endif %}\n{% if loop.last and add_generation_prompt %}\n{{ '<|assistant|>' }}\n{% endif %}\n{% endfor %}" ``` Which formats conversations as follows: ```bash # <|system|> # You are a friendly chatbot who always responds in the style of a pirate.</s> # <|user|> # How many helicopters can a human eat in one sitting?</s> # <|assistant|> # Ah, me hearty matey! But yer question be a puzzler! A human cannot eat a helicopter in one sitting, as helicopters are not edible. They be made of metal, plastic, and other materials, not food! ``` ## Hyperparameter Sweep We trained the `DPO`, `IPO` and `KTO` methods via the `loss_type` argument [TRL’s](https://github.com/huggingface/trl) `DPOTrainer` with the `beta` going from `0.01`, `0.1`, `0.2`, ..., `0.9`. We included `0.01` as we observed that some alignment algorithms are especially sensitive to this parameter. All experiments were trained for one epoch. All other hyperparameters are kept the same during each run, including the random seed. We then launched our scan on the Hugging Face cluster using the base configurations defined above. #GPURICH ```bash #!/bin/bash # Define an array containing the base configs we wish to fine tune configs=("zephyr" "openhermes") # Define an array of loss types loss_types=("sigmoid" "kto_pair" "ipo") # Define an array of beta values betas=("0.01" "0.1" "0.2" "0.3" "0.4" "0.5" "0.6" "0.7" "0.8" "0.9") # Outer loop for loss types for config in "${configs[@]}"; do for loss_type in "${loss_types[@]}"; do # Inner loop for beta values for beta in "${betas[@]}"; do # Determine the job name and model revision based on loss type job_name="$config_${loss_type}_beta_${beta}" model_revision="${loss_type}-${beta}" # Submit the job sbatch --job-name=${job_name} recipes/launch.slurm dpo pref_align_scan config_$config deepspeed_zero3 \\ "--beta=${beta} --loss_type=${loss_type} --output_dir=data/$config-7b-align-scan-${loss_type}-beta-${beta} --hub_model_revision=${model_revision}" done done done ``` ## Results We evaluated all models using MT Bench, a multi-turn benchmark that uses GPT-4 to judge models’ performance in eight different categories: Writing, Roleplay, Reasoning, Math, Coding, Extraction, STEM, and Humanities. Although imperfect, MT Bench is a good way to evaluate conversational LLMs. ### Zephyr-7b-beta-SFT | ![Zephyr comparison](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/blog/pref_tuning/Zephyr-comp.png) | |:--:| | MT-Bench scores for the Zephyr model for different \\( \beta \\).| For the Zephyr model, we observed that the best performance was achieved with the lowest \\( \beta\\) value, 0.01. This is consistent across all three of the algorithms tested, an interesting follow on experiment for the community would be a fine grained scan in the range of 0.0-0.2. While DPO can achieve the highest MT Bench score, we found that KTO (paired) achieves better results in all but one setting. IPO, while having stronger theoretical guarantees, appears to be worse than the base model in all but one setting. | ![Zephyr scan](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/blog/pref_tuning/zephyr_scan.png) | |:--:| | Break down of the best Zephyr models for each algorithm across MT Bench categories. | We can break down the best results for each algorithm across the categories that MT Bench evaluates to identify the strengths and weaknesses of these models. There is still a large area for improvement on the Reasoning, Coding, and Math axes. ### OpenHermes-7b-2.5 While the observations about each algorithm remain the same with OpenHermes, that is that DPO > KTO > IPO, the sweet spot for \\( \beta \\) varies wildly with each algorithm. With the best choice of \\( \beta \\) for DPO, KTO and IPO being 0.6, 0.3 and 0.01 respectively. | ![OpenHermes comparison](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/blog/pref_tuning/openhermes-comp.png) | |:--:| | MT Bench scores for the OpenHermes model for different \\( \beta \\). | OpenHermes-7b-2.5 is clearly a stronger base model, with a mere 0.3 improvement in MT Bench score after preference alignment. | ![OpenHermes scan](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/blog/pref_tuning/openhermes_scan.png) | |:--:| | Break down of the best OpenHermes models for each algorithm across MT Bench categories. | ## Summary & Insights In this post, we have highlighted the importance of choosing the right set of hyperparameters when performing preference alignment. We have empirically demonstrated that DPO and IPO can achieve comparable results, outperforming KTO in a paired preference setting. All code and configuration files replicating these results are now available in the [alignment-handbook](https://github.com/huggingface/alignment-handbook). The best-performing models and datasets can be found in [this collection](https://huggingface.co/collections/alignment-handbook/dpo-vs-kto-vs-ipo-65a69c5f03548d61dbe29ef8). ## What’s next? We will continue our work implementing new preference alignment algorithms in [TRL](https://github.com/huggingface/trl) and evaluating their performance. It seems, at least for the time being, that DPO is the most robust and best performing LLM alignment algorithm. KTO remains an interesting development, as both DPO and IPO require pairs preference data, whereas KTO can be applied to any dataset where responses are rated positively or negatively. We look forward to the new tools and techniques that will be developed in 2024!
1
0
hf_public_repos
hf_public_repos/blog/habana.md
--- title: "Habana Labs and Hugging Face Partner to Accelerate Transformer Model Training" thumbnail: /blog/assets/60_habana/habana.png authors: - user: susanlansing guest: true --- # Habana Labs and Hugging Face Partner to Accelerate Transformer Model Training *Santa Clara and San Francisco, CA, April 12th, 2022* Powered by deep learning, transformer models deliver state-of-the-art performance on a wide range of machine learning tasks, such as natural language processing, computer vision, speech, and more. However, training them at scale often requires a large amount of computing power, making the whole process unnecessarily long, complex, and costly. Today, [Habana® Labs](https://habana.ai/), a pioneer in high-efficiency, purpose-built deep learning processors, and Hugging Face, the home of [Transformer](https://github.com/huggingface/transformers) models, are happy to announce that they’re joining forces to make it easier and quicker to train high-quality transformer models. Thanks to the integration of Habana’s [SynapseAI software suite](https://habana.ai/training-software/) with the Hugging Face [Optimum open-source library](https://github.com/huggingface/optimum), data scientists and machine learning engineers can now accelerate their Transformer training jobs on Habana processors with just a few lines of code and enjoy greater productivity as well as lower training cost. [Habana Gaudi](https://habana.ai/training/) training solutions, which power Amazon’s EC2 DL1 instances and Supermicro’s X12 Gaudi AI Training Server, deliver price/performance up to 40% lower than comparable training solutions and enable customers to train more while spending less. The integration of ten 100 Gigabit Ethernet ports onto every Gaudi processor enables system scaling from 1 to thousands of Gaudis with ease and cost-efficiency. Habana’s SynapseAI® is optimized—at inception—to enable Gaudi performance and usability, supports TensorFlow and PyTorch frameworks, with a focus on computer vision and natural language processing applications. With 60,000+ stars on Github, 30,000+ models, and millions of monthly visits, Hugging Face is one of the fastest-growing projects in open source software history, and the go-to place for the machine learning community. With its [Hardware Partner Program](https://huggingface.co/hardware), Hugging Face provides Gaudi’s advanced deep learning hardware with the ultimate Transformer toolset. This partnership will enable rapid expansion of the Habana Gaudi training transformer model library, bringing Gaudi efficiency and ease of use to a wide array of customer use cases like natural language processing, computer vision, speech, and more. “*We’re excited to partner with Hugging Face and its many open-source developers to address the growing demand for transformer models that benefit from the efficiency, usability, and scalability of the Gaudi training platform*”, said Sree Ganesan, head of software product management, Habana Labs. “Habana Gaudi brings a new level of efficiency to deep learning model training, and we’re super excited to make this performance easily accessible to Transformer users with minimal code changes through Optimum”, said Jeff Boudier, product director at Hugging Face. To learn how to get started training with Habana Gaudi, please visit [https://developer.habana.ai](https://developer.habana.ai). For more info on the Hugging Face and Habana Gaudi collaboration, please visit [https://huggingface.co/Habana](https://huggingface.co/Habana).
2
0
hf_public_repos
hf_public_repos/blog/zero-shot-vqa-docmatix.md
--- title: "LAVE: Zero-shot VQA Evaluation on Docmatix with LLMs - Do We Still Need Fine-Tuning?" thumbnail: /blog/assets/184_zero_shot_docmatix/thumb.001.jpeg authors: - user: danaaubakirova - user: andito --- # LAVE: Zero-shot VQA Evaluation on Docmatix with LLMs - Do We Still Need Fine-Tuning? While developing Docmatix, we noticed that fine-tuning Florence-2 on it yielded great performance on DocVQA, but resulted in low scores on the benchmark. To enhance performance, we had to fine-tune the model further on DocVQA to learn the syntax required for the benchmark. Interestingly, this additional fine-tuning seemed to perform worse according to human evaluators, which is why we primarily used it for ablation studies and released the model only trained on Docmatix for broader use. Although the generated answers semantically align with the reference answers, as illustrated in Figure 1, they still receive low scores. This raises these questions: Should we fine-tune the models to improve these metrics, or should we develop new metrics that better align with human perception? <div align="center"> <img src="https://cdn-uploads.huggingface.co/production/uploads/640e21ef3c82bd463ee5a76d/RaQZkkcnTAcS80pPyt55J.png" alt="VQA Evaluation" style="width: 55%; border: none;"> </div> <p align="center"> <em> Figure 1: t-SNE visualization of Zero-Shot Generated and Reference Answers from Docmatix dataset </em> </p> ## Introduction Our community has recently focused on out-of-distribution (OOD) evaluation, utilizing methods like zero-shot transfer to unseen VQA tasks or fine-tuning on one VQA dataset and evaluating on another. This shift is increasingly relevant with the rise of synthetic datasets such as Docmatix, SciGraphQA, SimVQA used to fine-tune Vision Language Models (VLMs). Traditionally, VQA Accuracy has been the main metric for evaluating model performance. It relies on exact string matching between a model's predicted answer and a set of reference answers annotated by humans. This metric worked well because VQA evaluation followed an independent and identically distributed (IID) paradigm, where training and testing data distributions were similar, allowing models to adapt effectively [See details here](https://arxiv.org/pdf/2205.12191). In OOD settings, generated answers might not match reference answers despite being correct due to differences in format, specificity, or interpretation. This paradigm is perfectly illustrated in the Figure 1, where we compare the zero-shot generated captions vs the reference captions from the synthetic dataset. This is particularly true for instruction-generated datasets and their human-curated counterparts. Some [methods](https://proceedings.mlr.press/v202/li23q.html) have attempted to align answer formats with references, but this only addresses the symptom, not the root cause of flawed evaluation metrics. While human evaluation is reliable, it is costly and not scalable, highlighting the need for metrics that better align with human judgment. ## Method [Docmatix](https://huggingface.co/blog/docmatix) is the largest synthetic DocVQA dataset, generated from the curated document dataset, [PDFA](https://huggingface.co/datasets/pixparse/pdfa-eng-wds). It is 100x larger than previously available datasets. The human-curated counterpart is DocVQA, which serves as an evaluation benchmark for VQA models for Document Understanding. In this post, we are going to use **the subset of Docmatix** which consists around 200 test samples, which can be downloaded here [Docmatix-zero-shot-exp](https://huggingface.co/datasets/HuggingFaceM4/Docmatix/viewer/zero-shot-exp). <div style="display: flex; justify-content: center; align-items: center; gap: 0px; width: 100%; margin: 0 auto;"> <img src="https://cdn-uploads.huggingface.co/production/uploads/640e21ef3c82bd463ee5a76d/feXi3iSLo8hBXTh2y8NnR.png" alt="Image 1" style="width: 45%; height: auto; object-fit: cover;"> <img src="https://cdn-uploads.huggingface.co/production/uploads/640e21ef3c82bd463ee5a76d/2X4KdrTi6M8VYU6hOdmk1.png" alt="Image 2" style="width: 45%; height: auto; object-fit: cover;"> </div> <p align="center"> <em> Figure 2: The examples of Q&A pairs from Docmatix and DocVQA test set. Note: the corresponding images are not shown here. </em> </p> Although the content of the question and answer pairs in Docmatix and DocVQA is similar, their styles differ significantly. Traditional metrics like CIDER, ANLS, and BLEU can be overly restrictive for zero-shot evaluation in this context. Motivated by the similarity of the embeddings observed in t-SNE (Figure 1), we decided to use a different evaluation metric. In this post, we consider the LAVE (LLM-Assisted VQA Evaluation) metric to better assess generalization on this unseen but semantically similar dataset. <div style="display: flex; justify-content: center; align-items: center; gap: 10px; width: 100%; margin: 0 auto;"> <img src="https://cdn-uploads.huggingface.co/production/uploads/640e21ef3c82bd463ee5a76d/C4twDu9D6cw0XHdA57Spe.png" alt="Image 1" style="width: 30%; height: auto; object-fit: cover;"> <img src="https://cdn-uploads.huggingface.co/production/uploads/640e21ef3c82bd463ee5a76d/pYsiOyToOXzRitmRidejW.png" alt="Image 2" style="width: 30%; height: auto; object-fit: cover;"> <img src="https://cdn-uploads.huggingface.co/production/uploads/640e21ef3c82bd463ee5a76d/uM6IPAAvjyiYTPJXdB10w.png" alt="Image 3" style="width: 30%; height: auto; object-fit: cover;"> </div> <p align="center"> <em> Figure 3: t-SNE visualization of Question, Answer and Image features from Docmatix and DocVQA datasets </em> </p> <div style="display: flex; justify-content: center; align-items: center; gap: 10px; width: 100%; margin: 0 auto;"> <img src="https://cdn-uploads.huggingface.co/production/uploads/640e21ef3c82bd463ee5a76d/C4twDu9D6cw0XHdA57Spe.png" alt="Image 1" style="width: 30%; height: auto; object-fit: cover;"> <img src="https://cdn-uploads.huggingface.co/production/uploads/640e21ef3c82bd463ee5a76d/pYsiOyToOXzRitmRidejW.png" alt="Image 2" style="width: 30%; height: auto; object-fit: cover;"> <img src="https://cdn-uploads.huggingface.co/production/uploads/640e21ef3c82bd463ee5a76d/uM6IPAAvjyiYTPJXdB10w.png" alt="Image 3" style="width: 30%; height: auto; object-fit: cover;"> </div> <p align="center"> <em> Figure 5: t-SNE visualization of Question, Answer and Image features from Docmatix and DocVQA datasets </em> </p> For our evaluation, we chose [MPLUGDocOwl1.5](https://arxiv.org/pdf/2403.12895) as a baseline model. This model achieves an 84% ANLS score on the test subset of the original DocVQA dataset. We then ran a zero-shot generation on a subset of Docmatix, consisting of 200 images. We used [Llama-2-Chat-7b](https://huggingface.co/meta-llama/Llama-2-7b-chat-hf) for rating the answers. ## About LAVE We followed the procedure outlined in the [paper](https://arxiv.org/html/2310.02567v2). The VQA evaluation is framed as an answer-rating task suitable for in-context learning with LLMs. We used a rating scale from 1 to 3 to account for ambiguous questions or incomplete answers. The prompt included a task description, several demonstrations of input/output, and the input for a test example. We structured our task description and included the instruction **"Give the rationale before rating"** to showcase a justification for the assigned rating. Each demonstration comprised a question, a set of reference answers, the candidate answer, the answer rating, and an explanation for the rating. We also include the **"Provide only one rating"** to avoid sentence-by-sentence analysis, which sometimes resulted in several ratings. ```py task_description = """You are given a question, a set of gold-standard reference answers written by experts, and a candidate answer. Please rate the accuracy of the candidate answer for the question considering the reference answers. Use a scale of 1-3, with 1 indicating an incorrect or irrelevant answer, 2 indicating an ambiguous or incomplete answer, and 3 indicating a correct answer. Give the rationale before rating. Provide only one rating. THIS IS VERY IMPORTANT: A binary question should only be answered with 'yes' or 'no', otherwise the candidate answer is incorrect.""" demonstrations = [ { "question": "What's the weather like?", "reference_answer": ["sunny", "clear", "bright", "sunny", "sunny"], "generated_answer": "cloudy" } ] ``` #### Scoring Function Given the LLM’s generated text for the test example, we extracted the rating from the last character (either 1, 2, or 3) and mapped it to a score in the range [0, 1]: \[ s = \frac{r - 1}{2} \] #### Table of Results The results of our evaluation are summarized in the table below: <table style="border-collapse: collapse; width: 50%; margin: auto;"> <tr> <th style="border: 1px solid black; padding: 8px; text-align: center;">Metric</th> <th style="border: 1px solid black; padding: 8px; text-align: center;">CIDER</th> <th style="border: 1px solid black; padding: 8px; text-align: center;">BLEU</th> <th style="border: 1px solid black; padding: 8px; text-align: center;">ANLS</th> <th style="border: 1px solid black; padding: 8px; text-align: center;">LAVE</th> </tr> <tr> <td style="border: 1px solid black; padding: 8px; text-align: center;">Score</td> <td style="border: 1px solid black; padding: 8px; text-align: center;">0.1411</td> <td style="border: 1px solid black; padding: 8px; text-align: center;">0.0032</td> <td style="border: 1px solid black; padding: 8px; text-align: center;">0.002</td> <td style="border: 1px solid black; padding: 8px; text-align: center;">0.58</td> </tr> </table> ## Qualitative Examples <div align="center"> <img src="https://cdn-uploads.huggingface.co/production/uploads/640e21ef3c82bd463ee5a76d/5ljrlVqrHHB4VGRek7hJv.png" alt="VQA Evaluation" style="width:120%, border: none;"> </div> <p align="center"> <em> Figure 4: Llama rating and rationale for the generated and reference answers from Docmatix test subset. </em> </p> <div align="center"> <img src="https://cdn-uploads.huggingface.co/production/uploads/640e21ef3c82bd463ee5a76d/scly6WR_2Wvrk5qd05cx4.png" alt="VQA Evaluation" style="width:120%, border: none;"> </div> <p align="center"> <em> Figure 5: Llama rating and rationale for the generated and reference answers from Docmatix test subset. </em> </p> ## Are we too strict in evaluating VQA systems and do we need finetuning? We have approximately 50% accuracy gain when using LLMs to evaluate responses, indicating that the answers can be correct despite not adhering to a strict format. This suggests that our current evaluation metrics may be too rigid. It’s important to note that this is not a comprehensive research paper, and more ablation studies are needed to fully understand the effectiveness of different metrics on the evaluation of zero-shot performance on synthetic dataset. We hope this work serves as a starting point to broaden the current research focus on improving the evaluation of zero-shot vision-language models within the context of synthetic datasets and to explore more efficient approaches beyond prompt learning. ## References ``` @inproceedings{cascante2022simvqa, title={Simvqa: Exploring simulated environments for visual question answering}, author={Cascante-Bonilla, Paola and Wu, Hui and Wang, Letao and Feris, Rogerio S and Ordonez, Vicente}, booktitle={Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition}, pages={5056--5066}, year={2022} } @article{hu2024mplug, title={mplug-docowl 1.5: Unified structure learning for ocr-free document understanding}, author={Hu, Anwen and Xu, Haiyang and Ye, Jiabo and Yan, Ming and Zhang, Liang and Zhang, Bo and Li, Chen and Zhang, Ji and Jin, Qin and Huang, Fei and others}, journal={arXiv preprint arXiv:2403.12895}, year={2024} } @article{agrawal2022reassessing, title={Reassessing evaluation practices in visual question answering: A case study on out-of-distribution generalization}, author={Agrawal, Aishwarya and Kaji{\'c}, Ivana and Bugliarello, Emanuele and Davoodi, Elnaz and Gergely, Anita and Blunsom, Phil and Nematzadeh, Aida}, journal={arXiv preprint arXiv:2205.12191}, year={2022} } @inproceedings{li2023blip, title={Blip-2: Bootstrapping language-image pre-training with frozen image encoders and large language models}, author={Li, Junnan and Li, Dongxu and Savarese, Silvio and Hoi, Steven}, booktitle={International conference on machine learning}, pages={19730--19742}, year={2023}, organization={PMLR} } @inproceedings{manas2024improving, title={Improving automatic vqa evaluation using large language models}, author={Ma{\~n}as, Oscar and Krojer, Benno and Agrawal, Aishwarya}, booktitle={Proceedings of the AAAI Conference on Artificial Intelligence}, volume={38}, number={5}, pages={4171--4179}, year={2024} } @article{li2023scigraphqa, title={Scigraphqa: A large-scale synthetic multi-turn question-answering dataset for scientific graphs}, author={Li, Shengzhi and Tajbakhsh, Nima}, journal={arXiv preprint arXiv:2308.03349}, year={2023} } ```
3
0
hf_public_repos
hf_public_repos/blog/fast-mac-diffusers.md
--- title: Swift 🧨Diffusers - Fast Stable Diffusion for Mac thumbnail: /blog/assets/fast-mac-diffusers/thumbnail.png authors: - user: pcuenq - user: reach-vb --- # Swift 🧨Diffusers: Fast Stable Diffusion for Mac Transform your text into stunning images with ease using Diffusers for Mac, a native app powered by state-of-the-art diffusion models. It leverages a bouquet of SoTA Text-to-Image models contributed by the community to the Hugging Face Hub, and converted to Core ML for blazingly fast performance. Our latest version, 1.1, is now available on the [Mac App Store](https://apps.apple.com/app/diffusers/id1666309574) with significant performance upgrades and user-friendly interface tweaks. It's a solid foundation for future feature updates. Plus, the app is fully open source with a permissive [license](https://github.com/huggingface/swift-coreml-diffusers/blob/main/LICENSE), so you can build on it too! Check out our GitHub repository at https://github.com/huggingface/swift-coreml-diffusers for more information. <img style="border:none;" alt="Screenshot showing Diffusers for Mac UI" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/blog/fast-mac-diffusers/UI.png" /> ## What exactly is 🧨Diffusers for Mac anyway? The Diffusers app ([App Store](https://apps.apple.com/app/diffusers/id1666309574), [source code](https://github.com/huggingface/swift-coreml-diffusers)) is the Mac counterpart to our [🧨`diffusers` library](https://github.com/huggingface/diffusers). This library is written in Python with PyTorch, and uses a modular design to train and run diffusion models. It supports many different models and tasks, and is highly configurable and well optimized. It runs on Mac, too, using PyTorch's [`mps` accelerator](https://huggingface.co/docs/diffusers/optimization/mps), which is an alternative to `cuda` on Apple Silicon. Why would you want to run a native Mac app then? There are many reasons: - It uses Core ML models, instead of the original PyTorch ones. This is important because they allow for [additional optimizations](https://machinelearning.apple.com/research/stable-diffusion-coreml-apple-silicon) relevant to the specifics of Apple hardware, and because Core ML models can run on all the compute devices in your system: the CPU, the GPU and the Neural Engine, _at once_ – the Core ML framework will decide what portions of your model to run on each device to make it as fast as possible. PyTorch's `mps` device cannot use the Neural Engine. - It's a Mac app! We try to follow Apple's design language and guidelines so it feels at home on your Mac. No need to use the command line, create virtual environments or fix dependencies. - It's local and private. You don't need credits for online services and won't experience long queues – just generate all the images you want and use them for fun or work. Privacy is guaranteed: your prompts and images are yours to use, and will never leave your computer (unless you choose to share them). - [It's open source](https://github.com/huggingface/swift-coreml-diffusers), and it uses Swift, Swift UI and the latest languages and technologies for Mac and iOS development. If you are technically inclined, you can use Xcode to extend the code as you like. We welcome your contributions, too! ## Performance Benchmarks **TL;DR:** Depending on your computer Text-to-Image Generation can be up to **twice as fast** on Diffusers 1.1. ⚡️ We've done a lot of testing on several Macs to determine the best combinations of compute devices that yield optimum performance. For some computers it's best to use the GPU, while others work better when the Neural Engine, or ANE, is engaged. Come check out our benchmarks. All the combinations use the CPU in addition to either the GPU or the ANE. | Model name | Benchmark | M1 8 GB | M1 16 GB | M2 24 GB | M1 Max 64 GB | |:---------------------------------:|-----------|:-------:|:---------:|:--------:|:------------:| | Cores (performance/GPU/ANE) | | 4/8/16 | 4/8/16 | 4/8/16 | 8/32/16 | | Stable Diffusion 1.5 | | | | | | | | GPU | 32.9 | 32.8 | 21.9 | 9 | | | ANE | 18.8 | 18.7 | 13.1 | 20.4 | | Stable Diffusion 2 Base | | | | | | | | GPU | 30.2 | 30.2 | 19.4 | 8.3 | | | ANE | 14.5 | 14.4 | 10.5 | 15.3 | | Stable Diffusion 2.1 Base | | | | | | | | GPU | 29.6 | 29.4 | 19.5 | 8.3 | | | ANE | 14.3 | 14.3 | 10.5 | 15.3 | | OFA-Sys/small-stable-diffusion-v0 | | | | | | | | GPU | 22.1 | 22.5 | 14.5 | 6.3 | | | ANE | 12.3 | 12.7 | 9.1 | 13.2 | We found that the amount of memory does not seem to play a big factor on performance, but the number of CPU and GPU cores does. For example, on a M1 Max laptop, the generation with GPU is a lot faster than with ANE. That's likely because it has 4 times the number of GPU cores (and twice as many CPU performance cores) than the standard M1 processor, for the same amount of neural engine cores. Conversely, the standard M1 processors found in Mac Minis are **twice as fast** using ANE than GPU. Interestingly, we tested the use of _both_ GPU and ANE accelerators together, and found that it does not improve performance with respect to the best results obtained with just one of them. The cut point seems to be around the hardware characteristics of the M1 Pro chip (8 performance cores, 14 or 16 GPU cores), which we don't have access to at the moment. 🧨Diffusers version 1.1 automatically selects the best accelerator based on the computer where the app runs. Some device configurations, like the "Pro" variants, are not offered by any cloud services we know of, so our heuristics could be improved for them. If you'd like to help us gather data to keep improving the out-of-the-box experience of our app, read on! ## Community Call for Benchmark Data We are interested in running more comprehensive performance benchmarks on Mac devices. If you'd like to help, we've created [this GitHub issue](https://github.com/huggingface/swift-coreml-diffusers/issues/31) where you can post your results. We'll use them to optimize performance on an upcoming version of the app. We are particularly interested in M1 Pro, M2 Pro and M2 Max architectures 🤗 <img style="border:none;display:block;margin-left:auto;margin-right:auto;" alt="Screenshot showing the Advanced Compute Units picker" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/blog/fast-mac-diffusers/Advanced.png" /> ## Other Improvements in Version 1.1 In addition to the performance optimization and fixing a few bugs, we have focused on adding new features while trying to keep the UI as simple and clean as possible. Most of them are obvious (guidance scale, optionally disable the safety checker, allow generations to be canceled). Our favorite ones are the model download indicators, and a shortcut to reuse the seed from a previous generation in order to tweak the generation parameters. Version 1.1 also includes additional information about what the different generation settings do. We want 🧨Diffusers for Mac to make image generation as approachable as possible to all Mac users, not just technologists. ## Next Steps We believe there's a lot of untapped potential for image generation in the Apple ecosystem. In future updates we want to focus on the following: - Easy access to additional models from the Hub. Run any Dreambooth or fine-tuned model from the app, in a Mac-like way. - Release a version for iOS and iPadOS. There are many more ideas that we are considering. If you'd like to suggest your own, you are most welcome to do so [in our GitHub repo](https://github.com/huggingface/swift-coreml-diffusers).
4
0
hf_public_repos
hf_public_repos/blog/outlines-core.md
--- title: "Releasing Outlines-core 0.1.0: structured generation in Rust and Python" thumbnail: /blog/assets/outlines-core/thumbnail.gif authors: - user: bwillard guest: true org: dottxt - user: drbh - user: erikkaum - user: kc611 guest: true org: dottxt - user: remi guest: true org: dottxt - user: umut-sahin guest: true org: dottxt - user: willkurt guest: true org: dottxt --- dottxt and Hugging Face are excited to announce that we have been collaborating on [outlines-core](https://github.com/dottxt-ai/outlines-core), a Rust port of [outlines](https://github.com/dottxt-ai/outlines)’s core algorithms for structured generation. On top of getting reliable output from LLMs with outlines, this Rust port offers several further benefits to users of outlines: - Speed: Users can expect to see an 2x improvement in index compilation. - Separation of Concerns: It's now easier to incorporate structured generation into other libraries. `outlines-core` is very lightweight. - Portability: Having core algorithms in Rust allows binding for languages other than Python. These improvements should not only improve the performance for existing `outlines` users, but also dramatically increase the ways users can incorporate structured generation into their LLM workflows. `outlines-core` is now public, integrated in `outlines`, and the version `0.1.0` of the Python bindings are out. You can find the repo [here](https://github.com/dottxt-ai/outlines-core). ## A quick primer on structured generation 🧑‍🎓 ### How it works Structured generation means that your LLM is guaranteed to follow a desired format. This could be JSON, a Pydantic Model, a regular expression or a context-free grammar. The key is that structured generation forbids the 'wrong' tokens from being generated. Let’s take an extremely simple example. The LLM should generate a boolean, “true” or “false”. And nothing more. For the sake of illustration, let’s say that LLMs generate characters instead of tokens. So the first character is `"`, we can just skip the forward pass. For the second, we don’t need to sample from all possible characters. The LLM should just choose between `t` or `f`. <p align="center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/blog/outlines-core/graph.png"><br> </p> After that, regardless of the path we take, there is only one valid next character. If the LLM chose `t` as the first character, then it has to follow with `r`, `u` and `e`. And similarly if it chose `f` it follows with `a`, `l`, `s`, `e`. And will choose the last `"` as the final character regardless of the path. There is of course more under the hood, for more in-depth coverage we recommend this [dottxt blog](https://blog.dottxt.co/coalescence.html) and the [associated paper on arxiv](https://arxiv.org/abs/2307.09702). ### Why it’s important It might not immediately be obvious how amazing structured generation can be. The first use-case many think of is “nice, now my LLM can return valid JSON, so I can treat it as an API and serialize/deserialize JSON reliably”. But that’s just scratching the surface. When you think about it, structure is everywhere, even in places where you least expect it like the [GSM8K benchmark](https://blog.dottxt.co/performance-gsm8k.html). These are just a [few examples](https://dottxt-ai.github.io/outlines/cookbook/) of what structured generation enables: - Generating [synthetic data](https://dottxt-ai.github.io/outlines/latest/cookbook/dating_profiles/), (there's also an [integration with Distilabel](https://distilabel.argilla.io/dev/sections/pipeline_samples/examples/llama_cpp_with_outlines/) for this) - Extracting information from documents and images. - Function [calling/building agents](https://blog.dottxt.co/oss-v-gpt4.html) - [Chain of Thought](https://dottxt-ai.github.io/outlines/latest/cookbook/chain_of_thought/) - Making sure your LLM outputs a [valid tic-tac-toe board](https://x.com/dottxtai/status/1840826952577421646) - Or ever [generating virtual worlds!](https://github.com/dottxt-ai/demos/tree/main/lore-generator) And, perhaps more surprising, it reduces the sensitivity of evaluations to the [specific prompt being used](https://huggingface.co/blog/evaluation-structured-outputs) and the [number of shots](https://blog.dottxt.co/prompt-efficiency.html). Apart from the amazing tricks that structure gives you, it’s also more performant. The dottxt blog has many good articles with performance benchmarks. ## Why rewrite in Rust? 🦀 ### Speed Probably the first thing that comes to your mind when you hear “rewrite in Rust” is performance. And yes, that’s the case for `outlines-core` as well. Several key parts are yet to be moved over to Rust, and despite that, we already see an [average 2x improvement](https://github.com/dottxt-ai/benchmarks) in compilation speed. Before the Rust port, Outlines used Numba to accelerate the building of the index. While Numba is fast (the runtime performance is comparable to Rust), the JIT-compilation of the Numba functions added a source of latency during the first run, which was a source of frustration for many users. Using Rust means we can compile the index building functions ahead of time, adding no latency during the first run. While this was not important in a production context (since the first run could anyways be done as part of deployment), it can make a huge difference during the experimentation phase! ### Safety and Reliability One of the main reasons for rewriting Outlines in Rust is the emphasis on safety and reliability that Rust brings to the table. Rust's strong static typing, combined with Rust's ownership model, eliminate entire classes of bugs, such as null pointer dereferences and data races in concurrent code. This leads to more robust and secure software. In the context of Outlines, safety is crucial. Structured generation often involves complex data structures and manipulations, especially when dealing with high-performance inference engines. By leveraging Rust's safety guarantees, we reduce the risk of runtime errors and undefined behaviors that can arise from memory mismanagement. Additionally, Rust's compile-time checks encourage developers to write cleaner and more maintainable code. This improves the current codebase and makes future development more efficient. New contributors can onboard more quickly, and the code is easier to audit and verify for correctness. ### Separation of concerns Outlines was designed to do more than providing the core algorithms for structured generation. Among other things, it includes integrations to other libraries like `transformers` which mean the library packs many dependencies. Separating the core algorithms from the Outlines library means that other libraries wishing to include structured generation can do so by importing a very lightweight library. So we can imagine in the near future libraries such as `transformers` and `llama-cpp-python` integrating structured generation directly. This allows the dottxt team to focus on the core algorithms. ### Portability Most of LLM training is written in Python, but inference is slightly different. It happens on many different devices, on specialized servers and is written in a range of programming languages. This is why portability also matters for structured generation. By having the core functionality of `outlines` written in rust, we can now create bindings to other languages. For example, this port makes the integration into the [text-generation-inference](https://github.com/huggingface/text-generation-inference) much smoother. TGI’s server logic is written in Rust, and we want to avoid having to call Python code as much as we possibly can. It also means libraries like `mistral.rs` or models implemented using [candle](https://github.com/huggingface/candle) can benefit from Outlines’s performance and capabilities. In the future we plan to explore bindings to JS/TS, allowing outlines to be used in transformers-js. Or potentially Swift bindings, making outlines natively usable on Apple devices. But for now the focus is going to be on the Python bindings, and continuing to make `outlines-core`’s feature set complete by expanding support for the JSON Schema specification. ## Contribute Do you like working with structured generation, parsers, making LLMs output only valid JSON? Star the [library](https://github.com/dottxt-ai/outlines-core), tweet about it, join in and contribute! Share your work on Twitter, and with [dottxt’s](https://discord.com/invite/R9DSu34mGd) and Hugging Face's community.
5
0
hf_public_repos
hf_public_repos/blog/intel.md
--- title: "Intel and Hugging Face Partner to Democratize Machine Learning Hardware Acceleration" thumbnail: /blog/assets/80_intel/01.png authors: - user: juliensimon --- # Intel and Hugging Face Partner to Democratize Machine Learning Hardware Acceleration ![image](assets/80_intel/01.png) The mission of Hugging Face is to democratize good machine learning and maximize its positive impact across industries and society. Not only do we strive to advance Transformer models, but we also work hard on simplifying their adoption. Today, we're excited to announce that Intel has officially joined our [Hardware Partner Program](https://huggingface.co/hardware). Thanks to the [Optimum](https://github.com/huggingface/optimum-intel) open-source library, Intel and Hugging Face will collaborate to build state-of-the-art hardware acceleration to train, fine-tune and predict with Transformers. Transformer models are increasingly large and complex, which can cause production challenges for latency-sensitive applications like search or chatbots. Unfortunately, latency optimization has long been a hard problem for Machine Learning (ML) practitioners. Even with deep knowledge of the underlying framework and hardware platform, it takes a lot of trial and error to figure out which knobs and features to leverage. Intel provides a complete foundation for accelerated AI with the Intel Xeon Scalable CPU platform and a wide range of hardware-optimized AI software tools, frameworks, and libraries. Thus, it made perfect sense for Hugging Face and Intel to join forces and collaborate on building powerful model optimization tools that let users achieve the best performance, scale, and productivity on Intel platforms. “*We’re excited to work with Hugging Face to bring the latest innovations of Intel Xeon hardware and Intel AI software to the Transformers community, through open source integration and integrated developer experiences.*”, says Wei Li, Intel Vice President & General Manager, AI and Analytics. In recent months, Intel and Hugging Face collaborated on scaling Transformer workloads. We published detailed tuning guides and benchmarks on inference ([part 1](https://huggingface.co/blog/bert-cpu-scaling-part-1), [part 2](https://huggingface.co/blog/bert-cpu-scaling-part-2)) and achieved [single-digit millisecond latency](https://huggingface.co/blog/infinity-cpu-performance) for DistilBERT on the latest Intel Xeon Ice Lake CPUs. On the training side, we added support for [Habana Gaudi](https://huggingface.co/blog/getting-started-habana) accelerators, which deliver up to 40% better price-performance than GPUs. The next logical step was to expand on this work and share it with the ML community. Enter the [Optimum Intel](https://github.com/huggingface/optimum-intel) open source library! Let’s take a deeper look at it. ## Get Peak Transformers Performance with Optimum Intel [Optimum](https://github.com/huggingface/optimum) is an open-source library created by Hugging Face to simplify Transformer acceleration across a growing range of training and inference devices. Thanks to built-in optimization techniques, you can start accelerating your workloads in minutes, using ready-made scripts, or applying minimal changes to your existing code. Beginners can use Optimum out of the box with excellent results. Experts can keep tweaking for maximum performance. [Optimum Intel](https://github.com/huggingface/optimum-intel) is part of Optimum and builds on top of the [Intel Neural Compressor](https://www.intel.com/content/www/us/en/developer/tools/oneapi/neural-compressor.html) (INC). INC is an [open-source library](https://github.com/intel/neural-compressor) that delivers unified interfaces across multiple deep learning frameworks for popular network compression technologies, such as quantization, pruning, and knowledge distillation. This tool supports automatic accuracy-driven tuning strategies to help users quickly build the best quantized model. With Optimum Intel, you can apply state-of-the-art optimization techniques to your Transformers with minimal effort. Let’s look at a complete example. ## Case study: Quantizing DistilBERT with Optimum Intel In this example, we will run post-training quantization on a DistilBERT model fine-tuned for classification. Quantization is a process that shrinks memory and compute requirements by reducing the bit width of model parameters. For example, you can often replace 32-bit floating-point parameters with 8-bit integers at the expense of a small drop in prediction accuracy. We have already fine-tuned the original model to classify product reviews for shoes according to their star rating (from 1 to 5 stars). You can view this [model](https://huggingface.co/juliensimon/distilbert-amazon-shoe-reviews) and its [quantized](https://huggingface.co/juliensimon/distilbert-amazon-shoe-reviews-quantized?) version on the Hugging Face hub. You can also test the original model in this [Space](https://huggingface.co/spaces/juliensimon/amazon-shoe-reviews-spaces). Let’s get started! All code is available in this [notebook](https://gitlab.com/juliensimon/huggingface-demos/-/blob/main/amazon-shoes/03_optimize_inc_quantize.ipynb). As usual, the first step is to install all required libraries. It’s worth mentioning that we have to work with a CPU-only version of PyTorch for the quantization process to work correctly. ``` pip -q uninstall torch -y pip -q install torch==1.11.0+cpu --extra-index-url https://download.pytorch.org/whl/cpu pip -q install transformers datasets optimum[neural-compressor] evaluate --upgrade ``` Then, we prepare an evaluation dataset to assess model performance during quantization. Starting from the dataset we used to fine-tune the original model, we only keep a few thousand reviews and their labels and save them to local storage. Next, we load the original model, its tokenizer, and the evaluation dataset from the Hugging Face hub. ``` from datasets import load_dataset from transformers import AutoModelForSequenceClassification, AutoTokenizer model_name = "juliensimon/distilbert-amazon-shoe-reviews" model = AutoModelForSequenceClassification.from_pretrained(model_name, num_labels=5) tokenizer = AutoTokenizer.from_pretrained(model_name) eval_dataset = load_dataset("prashantgrao/amazon-shoe-reviews", split="test").select(range(300)) ``` Next, we define an evaluation function that computes model metrics on the evaluation dataset. This allows the Optimum Intel library to compare these metrics before and after quantization. For this purpose, the Hugging Face [evaluate](https://github.com/huggingface/evaluate/) library is very convenient! ``` import evaluate def eval_func(model): task_evaluator = evaluate.evaluator("text-classification") results = task_evaluator.compute( model_or_pipeline=model, tokenizer=tokenizer, data=eval_dataset, metric=evaluate.load("accuracy"), label_column="labels", label_mapping=model.config.label2id, ) return results["accuracy"] ``` We then set up the quantization job using a [configuration]. You can find details on this configuration on the Neural Compressor [documentation](https://github.com/intel/neural-compressor/blob/master/docs/source/quantization.md). Here, we go for post-training dynamic quantization with an acceptable accuracy drop of 5%. If accuracy drops more than the allowed 5%, different part of the model will then be quantized until it an acceptable drop in accuracy or if the maximum number of trials, here set to 10, is reached. ``` from neural_compressor.config import AccuracyCriterion, PostTrainingQuantConfig, TuningCriterion tuning_criterion = TuningCriterion(max_trials=10) accuracy_criterion = AccuracyCriterion(tolerable_loss=0.05) # Load the quantization configuration detailing the quantization we wish to apply quantization_config = PostTrainingQuantConfig( approach="dynamic", accuracy_criterion=accuracy_criterion, tuning_criterion=tuning_criterion, ) ``` We can now launch the quantization job and save the resulting model and its configuration file to local storage. ``` from neural_compressor.config import PostTrainingQuantConfig from optimum.intel.neural_compressor import INCQuantizer # The directory where the quantized model will be saved save_dir = "./model_inc" quantizer = INCQuantizer.from_pretrained(model=model, eval_fn=eval_func) quantizer.quantize(quantization_config=quantization_config, save_directory=save_dir) ``` The log tells us that Optimum Intel has quantized 38 ```Linear``` and 2 ```Embedding``` operators. ``` [INFO] |******Mixed Precision Statistics*****| [INFO] +----------------+----------+---------+ [INFO] | Op Type | Total | INT8 | [INFO] +----------------+----------+---------+ [INFO] | Embedding | 2 | 2 | [INFO] | Linear | 38 | 38 | [INFO] +----------------+----------+---------+ ``` Comparing the first layer of the original model (```model.distilbert.transformer.layer[0]```) and its quantized version (```inc_model.distilbert.transformer.layer[0]```), we see that ```Linear``` has indeed been replaced by ```DynamicQuantizedLinear```, its quantized equivalent. ``` # Original model TransformerBlock( (attention): MultiHeadSelfAttention( (dropout): Dropout(p=0.1, inplace=False) (q_lin): Linear(in_features=768, out_features=768, bias=True) (k_lin): Linear(in_features=768, out_features=768, bias=True) (v_lin): Linear(in_features=768, out_features=768, bias=True) (out_lin): Linear(in_features=768, out_features=768, bias=True) ) (sa_layer_norm): LayerNorm((768,), eps=1e-12, elementwise_affine=True) (ffn): FFN( (dropout): Dropout(p=0.1, inplace=False) (lin1): Linear(in_features=768, out_features=3072, bias=True) (lin2): Linear(in_features=3072, out_features=768, bias=True) ) (output_layer_norm): LayerNorm((768,), eps=1e-12, elementwise_affine=True) ) ``` ``` # Quantized model TransformerBlock( (attention): MultiHeadSelfAttention( (dropout): Dropout(p=0.1, inplace=False) (q_lin): DynamicQuantizedLinear(in_features=768, out_features=768, dtype=torch.qint8, qscheme=torch.per_channel_affine) (k_lin): DynamicQuantizedLinear(in_features=768, out_features=768, dtype=torch.qint8, qscheme=torch.per_channel_affine) (v_lin): DynamicQuantizedLinear(in_features=768, out_features=768, dtype=torch.qint8, qscheme=torch.per_channel_affine) (out_lin): DynamicQuantizedLinear(in_features=768, out_features=768, dtype=torch.qint8, qscheme=torch.per_channel_affine) ) (sa_layer_norm): LayerNorm((768,), eps=1e-12, elementwise_affine=True) (ffn): FFN( (dropout): Dropout(p=0.1, inplace=False) (lin1): DynamicQuantizedLinear(in_features=768, out_features=3072, dtype=torch.qint8, qscheme=torch.per_channel_affine) (lin2): DynamicQuantizedLinear(in_features=3072, out_features=768, dtype=torch.qint8, qscheme=torch.per_channel_affine) ) (output_layer_norm): LayerNorm((768,), eps=1e-12, elementwise_affine=True) ) ``` Very well, but how does this impact accuracy and prediction time? Before and after each quantization step, Optimum Intel runs the evaluation function on the current model. The accuracy of the quantized model is now a bit lower (``` 0.546```) than the original model (```0.574```). We also see that the evaluation step of the quantized model was 1.34x faster than the original model. Not bad for a few lines of code! ``` [INFO] |**********************Tune Result Statistics**********************| [INFO] +--------------------+----------+---------------+------------------+ [INFO] | Info Type | Baseline | Tune 1 result | Best tune result | [INFO] +--------------------+----------+---------------+------------------+ [INFO] | Accuracy | 0.5740 | 0.5460 | 0.5460 | [INFO] | Duration (seconds) | 13.1534 | 9.7695 | 9.7695 | [INFO] +--------------------+----------+---------------+------------------+ ``` You can find the resulting [model](https://huggingface.co/juliensimon/distilbert-amazon-shoe-reviews-quantized) hosted on the Hugging Face hub. To load a quantized model hosted locally or on the 🤗 hub, you can do as follows : ``` from optimum.intel.neural_compressor import INCModelForSequenceClassification inc_model = INCModelForSequenceClassification.from_pretrained(save_dir) ``` ## We’re only getting started In this example, we showed you how to easily quantize models post-training with Optimum Intel, and that’s just the beginning. The library supports other types of quantization as well as pruning, a technique that zeroes or removes model parameters that have little or no impact on the predicted outcome. We are excited to partner with Intel to bring Hugging Face users peak efficiency on the latest Intel Xeon CPUs and Intel AI libraries. Please [give Optimum Intel a star](https://github.com/huggingface/optimum-intel) to get updates, and stay tuned for many upcoming features! *Many thanks to [Ella Charlaix](https://github.com/echarlaix) for her help on this post.*
6
0
hf_public_repos
hf_public_repos/blog/getting-started-with-embeddings.md
--- title: 'Getting Started With Embeddings' thumbnail: /blog/assets/80_getting_started_with_embeddings/thumbnail.png authors: - user: espejelomar --- # Getting Started With Embeddings Check out this tutorial with the Notebook Companion: <a target="_blank" href="https://colab.research.google.com/github/huggingface/blog/blob/main/notebooks/80_getting_started_with_embeddings.ipynb"> <img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/> </a> ## Understanding embeddings An embedding is a numerical representation of a piece of information, for example, text, documents, images, audio, etc. The representation captures the semantic meaning of what is being embedded, making it robust for many industry applications. Given the text "What is the main benefit of voting?", an embedding of the sentence could be represented in a vector space, for example, with a list of 384 numbers (for example, [0.84, 0.42, ..., 0.02]). Since this list captures the meaning, we can do exciting things, like calculating the distance between different embeddings to determine how well the meaning of two sentences matches. Embeddings are not limited to text! You can also create an embedding of an image (for example, a list of 384 numbers) and compare it with a text embedding to determine if a sentence describes the image. This concept is under powerful systems for image search, classification, description, and more! How are embeddings generated? The open-source library called [Sentence Transformers](https://www.sbert.net/index.html) allows you to create state-of-the-art embeddings from images and text for free. This blog shows an example with this library. ## What are embeddings for? > "[...] once you understand this ML multitool (embedding), you'll be able to build everything from search engines to recommendation systems to chatbots and a whole lot more. You don't have to be a data scientist with ML expertise to use them, nor do you need a huge labeled dataset." - [Dale Markowitz, Google Cloud](https://cloud.google.com/blog/topics/developers-practitioners/meet-ais-multitool-vector-embeddings). Once a piece of information (a sentence, a document, an image) is embedded, the creativity starts; several interesting industrial applications use embeddings. E.g., Google Search uses embeddings to [match text to text and text to images](https://cloud.google.com/blog/topics/developers-practitioners/meet-ais-multitool-vector-embeddings); Snapchat uses them to "[serve the right ad to the right user at the right time](https://eng.snap.com/machine-learning-snap-ad-ranking)"; and Meta (Facebook) uses them for [their social search](https://research.facebook.com/publications/embedding-based-retrieval-in-facebook-search/). Before they could get intelligence from embeddings, these companies had to embed their pieces of information. An embedded dataset allows algorithms to search quickly, sort, group, and more. However, it can be expensive and technically complicated. In this post, we use simple open-source tools to show how easy it can be to embed and analyze a dataset. ## Getting started with embeddings We will create a small Frequently Asked Questions (FAQs) engine: receive a query from a user and identify which FAQ is the most similar. We will use the [US Social Security Medicare FAQs](https://faq.ssa.gov/en-US/topic/?id=CAT-01092). But first, we need to embed our dataset (other texts use the terms encode and embed interchangeably). The Hugging Face Inference API allows us to embed a dataset using a quick POST call easily. Since the embeddings capture the semantic meaning of the questions, it is possible to compare different embeddings and see how different or similar they are. Thanks to this, you can get the most similar embedding to a query, which is equivalent to finding the most similar FAQ. Check out our [semantic search tutorial](https://huggingface.co/spaces/sentence-transformers/embeddings-semantic-search) for a more detailed explanation of how this mechanism works. In a nutshell, we will: 1. Embed Medicare's FAQs using the Inference API. 2. Upload the embedded questions to the Hub for free hosting. 3. Compare a customer's query to the embedded dataset to identify which is the most similar FAQ. ## 1. Embedding a dataset The first step is selecting an existing pre-trained model for creating the embeddings. We can choose a model from the [Sentence Transformers library](https://huggingface.co/sentence-transformers). In this case, let's use the ["sentence-transformers/all-MiniLM-L6-v2"](https://huggingface.co/sentence-transformers/all-MiniLM-L6-v2) because it's a small but powerful model. In a future post, we will examine other models and their trade-offs. Log in to the Hub. You must create a write token in your [Account Settings](http://hf.co/settings/tokens). We will store the write token in `hf_token`. ```py model_id = "sentence-transformers/all-MiniLM-L6-v2" hf_token = "get your token in http://hf.co/settings/tokens" ``` To generate the embeddings you can use the `https://api-inference.huggingface.co/pipeline/feature-extraction/{model_id}` endpoint with the headers `{"Authorization": f"Bearer {hf_token}"}`. Here is a function that receives a dictionary with the texts and returns a list with embeddings. ```py import requests api_url = f"https://api-inference.huggingface.co/pipeline/feature-extraction/{model_id}" headers = {"Authorization": f"Bearer {hf_token}"} ``` The first time you generate the embeddings, it may take a while (approximately 20 seconds) for the API to return them. We use the `retry` decorator (install with `pip install retry`) so that if on the first try, `output = query(dict(inputs = texts))` doesn't work, wait 10 seconds and try three times again. This happens because, on the first request, the model needs to be downloaded and installed on the server, but subsequent calls are much faster. ```py def query(texts): response = requests.post(api_url, headers=headers, json={"inputs": texts, "options":{"wait_for_model":True}}) return response.json() ``` The current API does not enforce strict rate limitations. Instead, Hugging Face balances the loads evenly between all our available resources and favors steady flows of requests. If you need to embed several texts or images, the [Hugging Face Accelerated Inference API](https://huggingface.co/docs/api-inference/index) would speed the inference and let you choose between using a CPU or GPU. ```py texts = ["How do I get a replacement Medicare card?", "What is the monthly premium for Medicare Part B?", "How do I terminate my Medicare Part B (medical insurance)?", "How do I sign up for Medicare?", "Can I sign up for Medicare Part B if I am working and have health insurance through an employer?", "How do I sign up for Medicare Part B if I already have Part A?", "What are Medicare late enrollment penalties?", "What is Medicare and who can get it?", "How can I get help with my Medicare Part A and Part B premiums?", "What are the different parts of Medicare?", "Will my Medicare premiums be higher because of my higher income?", "What is TRICARE ?", "Should I sign up for Medicare Part B if I have Veterans' Benefits?"] output = query(texts) ``` As a response, you get back a list of lists. Each list contains the embedding of a FAQ. The model, ["sentence-transformers/all-MiniLM-L6-v2"](https://huggingface.co/sentence-transformers/all-MiniLM-L6-v2), is encoding the input questions to 13 embeddings of size 384 each. Let's convert the list to a Pandas `DataFrame` of shape (13x384). ```py import pandas as pd embeddings = pd.DataFrame(output) ``` It looks similar to this matrix: ```py [[-0.02388945 0.05525852 -0.01165488 ... 0.00577787 0.03409787 -0.0068891 ] [-0.0126876 0.04687412 -0.01050217 ... -0.02310316 -0.00278466 0.01047371] [ 0.00049438 0.11941205 0.00522949 ... 0.01687654 -0.02386115 0.00526433] ... [-0.03900796 -0.01060951 -0.00738271 ... -0.08390449 0.03768405 0.00231361] [-0.09598278 -0.06301168 -0.11690582 ... 0.00549841 0.1528919 0.02472013] [-0.01162949 0.05961934 0.01650903 ... -0.02821241 -0.00116556 0.0010672 ]] ``` ## 2. Host embeddings for free on the Hugging Face Hub 🤗 Datasets is a library for quickly accessing and sharing datasets. Let's host the embeddings dataset in the Hub using the user interface (UI). Then, anyone can load it with a single line of code. You can also use the terminal to share datasets; see [the documentation](https://huggingface.co/docs/datasets/share#share) for the steps. In the [notebook companion](https://colab.research.google.com/github/huggingface/blog/blob/main/notebooks/80_getting_started_with_embeddings.ipynb) of this entry, you will be able to use the terminal to share the dataset. If you want to skip this section, check out the [`ITESM/embedded_faqs_medicare` repo](https://huggingface.co/datasets/ITESM/embedded_faqs_medicare) with the embedded FAQs. First, we export our embeddings from a Pandas `DataFrame` to a CSV. You can save your dataset in any way you prefer, e.g., zip or pickle; you don't need to use Pandas or CSV. Since our embeddings file is not large, we can store it in a CSV, which is easily inferred by the `datasets.load_dataset()` function we will employ in the next section (see the [Datasets documentation](https://huggingface.co/docs/datasets/about_dataset_load#build-and-load)), i.e., we don't need to create a loading script. We will save the embeddings with the name `embeddings.csv`. ```py embeddings.to_csv("embeddings.csv", index=False) ``` Follow the next steps to host `embeddings.csv` in the Hub. * Click on your user in the top right corner of the [Hub UI](https://huggingface.co/). * Create a dataset with "New dataset." ![](assets/80_getting_started_with_embeddings/SelectDataset.png) * Choose the Owner (organization or individual), name, and license of the dataset. Select if you want it to be private or public. Create the dataset. ![](assets/80_getting_started_with_embeddings/createDataset.png) * Go to the "Files" tab (screenshot below) and click "Add file" and "Upload file." ![](assets/80_getting_started_with_embeddings/AddFile.png) * Finally, drag or upload the dataset, and commit the changes. ![](assets/80_getting_started_with_embeddings/UploadFile.png) Now the dataset is hosted on the Hub for free. You (or whoever you want to share the embeddings with) can quickly load them. Let's see how. ## 3. Get the most similar Frequently Asked Questions to a query Suppose a Medicare customer asks, "How can Medicare help me?". We will **find** which of our FAQs could best answer our user query. We will create an embedding of the query that can represent its semantic meaning. We then compare it to each embedding in our FAQ dataset to identify which is closest to the query in vector space. Install the 🤗 Datasets library with `pip install datasets`. Then, load the embedded dataset from the Hub and convert it to a PyTorch `FloatTensor`. Note that this is not the only way to operate on a `Dataset`; for example, you could use NumPy, Tensorflow, or SciPy (refer to the [Documentation](https://huggingface.co/docs/datasets/loading)). If you want to practice with a real dataset, the [`ITESM/embedded_faqs_medicare`](https://huggingface.co/datasets/ITESM/embedded_faqs_medicare) repo contains the embedded FAQs, or you can use the [companion notebook](https://colab.research.google.com/github/huggingface/blog/blob/main/notebooks/80_getting_started_with_embeddings.ipynb) to this blog. ```py import torch from datasets import load_dataset faqs_embeddings = load_dataset('namespace/repo_name') dataset_embeddings = torch.from_numpy(faqs_embeddings["train"].to_pandas().to_numpy()).to(torch.float) ``` We use the query function we defined before to embed the customer's question and convert it to a PyTorch `FloatTensor` to operate over it efficiently. Note that after the embedded dataset is loaded, we could use the `add_faiss_index` and `search` methods of a `Dataset` to identify the closest FAQ to an embedded query using the [faiss library](https://github.com/facebookresearch/faiss). Here is a [nice tutorial of the alternative](https://huggingface.co/docs/datasets/faiss_es). ```py question = ["How can Medicare help me?"] output = query(question) query_embeddings = torch.FloatTensor(output) ``` You can use the `util.semantic_search` function in the Sentence Transformers library to identify which of the FAQs are closest (most similar) to the user's query. This function uses cosine similarity as the default function to determine the proximity of the embeddings. However, you could also use other functions that measure the distance between two points in a vector space, for example, the dot product. Install `sentence-transformers` with `pip install -U sentence-transformers`, and search for the five most similar FAQs to the query. ```py from sentence_transformers.util import semantic_search hits = semantic_search(query_embeddings, dataset_embeddings, top_k=5) ``` `util.semantic_search` identifies how close each of the 13 FAQs is to the customer query and returns a list of dictionaries with the top `top_k` FAQs. `hits` looks like this: ```py [{'corpus_id': 8, 'score': 0.75653076171875}, {'corpus_id': 7, 'score': 0.7418993711471558}, {'corpus_id': 3, 'score': 0.7252674102783203}, {'corpus_id': 9, 'score': 0.6735571622848511}, {'corpus_id': 10, 'score': 0.6505177617073059}] ``` The values ​​in `corpus_id` allow us to index the list of `texts` we defined in the first section and get the five most similar FAQs: ```py print([texts[hits[0][i]['corpus_id']] for i in range(len(hits[0]))]) ``` Here are the 5 FAQs that come closest to the customer's query: ```py ['How can I get help with my Medicare Part A and Part B premiums?', 'What is Medicare and who can get it?', 'How do I sign up for Medicare?', 'What are the different parts of Medicare?', 'Will my Medicare premiums be higher because of my higher income?'] ``` This list represents the 5 FAQs closest to the customer's query. Nice! We used here PyTorch and Sentence Transformers as our main numerical tools. However, we could have defined the cosine similarity and ranking functions by ourselves using tools such as NumPy and SciPy. ## Additional resources to keep learning If you want to know more about the Sentence Transformers library: - The [Hub Organization](https://huggingface.co/sentence-transformers) for all the new models and instructions on how to download models. - The [Nils Reimers tweet](https://twitter.com/Nils_Reimers/status/1487014195568775173) comparing Sentence Transformer models with GPT-3 Embeddings. Spoiler alert: the Sentence Transformers are awesome! - The [Sentence Transformers documentation](https://www.sbert.net/), - [Nima's thread](https://twitter.com/NimaBoscarino/status/1535331680805801984) on recent research. Thanks for reading!
7
0
hf_public_repos
hf_public_repos/blog/mms_adapters.md
--- title: "Fine-Tune MMS Adapter Models for low-resource ASR" thumbnail: /blog/assets/151_mms/mms_map.png authors: - user: patrickvonplaten --- # **Fine-tuning MMS Adapter Models for Multi-Lingual ASR** <a target="_blank" href="https://colab.research.google.com/github/patrickvonplaten/notebooks/blob/master/Fine_Tune_MMS_on_Common_Voice.ipynb"> <img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/> </a> ***New (06/2023)***: *This blog post is strongly inspired by ["Fine-tuning XLS-R on Multi-Lingual ASR"](https://huggingface.co/blog/fine-tune-xlsr-wav2vec2)* and can be seen as an improved version of it. **Wav2Vec2** is a pretrained model for Automatic Speech Recognition (ASR) and was released in [September 2020](https://ai.facebook.com/blog/wav2vec-20-learning-the-structure-of-speech-from-raw-audio/) by *Alexei Baevski, Michael Auli, and Alex Conneau*. Soon after the strong performance of Wav2Vec2 was demonstrated on one of the most popular English datasets for ASR, called [LibriSpeech](https://huggingface.co/datasets/librispeech_asr), *Facebook AI* presented two multi-lingual versions of Wav2Vec2, called [XLSR](https://arxiv.org/abs/2006.13979) and [XLM-R](https://ai.facebook.com/blog/-xlm-r-state-of-the-art-cross-lingual-understanding-through-self-supervision/), capable of recognising speech in up to 128 languages. XLSR stands for *cross-lingual speech representations* and refers to the model's ability to learn speech representations that are useful across multiple languages. Meta AI's most recent release, [**Massive Multilingual Speech (MMS)**](https://ai.facebook.com/blog/multilingual-model-speech-recognition/) by *Vineel Pratap, Andros Tjandra, Bowen Shi, et al.* takes multi-lingual speech representations to a new level. Over 1,100 spoken languages can be identified, transcribed and generated with the various [language identification, speech recognition, and text-to-speech checkpoints released](https://huggingface.co/models?other=mms). In this blog post, we show how MMS's Adapter training achieves astonishingly low word error rates after just 10-20 minutes of fine-tuning. For low-resource languages, we **strongly** recommend using MMS' Adapter training as opposed to fine-tuning the whole model as is done in ["Fine-tuning XLS-R on Multi-Lingual ASR"](https://huggingface.co/blog/fine-tune-xlsr-wav2vec2). In our experiments, MMS' Adapter training is both more memory efficient, more robust and yields better performance for low-resource languages. For medium to high resource languages it can still be advantegous to fine-tune the whole checkpoint instead of using Adapter layers though. ![wav2vec2_structure](/blog/assets/151_mms/mms_map.png) ## **Preserving the world's language diversity** According to https://www.ethnologue.com/ around 3000, or 40% of all "living" languages, are endangered due to fewer and fewer native speakers. This trend will only continue in an increasingly globalized world. **MMS** is capable of transcribing many languages which are endangered, such as *Ari* or *Kaivi*. In the future, MMS can play a vital role in keeping languages alive by helping the remaining speakers to create written records and communicating in their native tongue. To adapt to 1000+ different vocabularies, **MMS** uses of Adapters - a training method where only a small fraction of model weights are trained. Adapter layers act like linguistic bridges, enabling the model to leverage knowledge from one language when deciphering another. ## **Fine-tuning MMS** **MMS** unsupervised checkpoints were pre-trained on more than **half a million** hours of audio in over **1,400** languages, ranging from 300 million to one billion parameters. You can find the pretrained-only checkpoints on the 🤗 Hub for model sizes of 300 million parameters (300M) and one billion parameters (1B): - [**`mms-300m`**](https://huggingface.co/facebook/mms-300m) - [**`mms-1b`**](https://huggingface.co/facebook/mms-1b) *Note*: If you want to fine-tune the base models, you can do so in the exact same way as shown in ["Fine-tuning XLS-R on Multi-Lingual ASR"](https://huggingface.co/blog/fine-tune-xlsr-wav2vec2). Similar to [BERT's masked language modeling objective](http://jalammar.github.io/illustrated-bert/), MMS learns contextualized speech representations by randomly masking feature vectors before passing them to a transformer network during self-supervised pre-training. For ASR, the pretrained [`MMS-1B` checkpoint](https://huggingface.co/facebook/mms-1b) was further fine-tuned in a supervised fashion on 1000+ languages with a joint vocabulary output layer. As a final step, the joint vocabulary output layer was thrown away and language-specific adapter layers were kept instead. Each adapter layer contains **just** ~2.5M weights, consisting of small linear projection layers for each attention block as well as a language-specific vocabulary output layer. Three **MMS** checkpoints fine-tuned for speech recognition (ASR) have been released. They include 102, 1107, and 1162 adapter weights respectively (one for each language): - [**`mms-1b-fl102`**](https://huggingface.co/facebook/mms-1b-fl102) - [**`mms-1b-l1107`**](https://huggingface.co/facebook/mms-1b-l1107) - [**`mms-1b-all`**](https://huggingface.co/facebook/mms-1b-all) You can see that the base models are saved (as usual) as a [`model.safetensors` file](https://huggingface.co/facebook/mms-1b-all/blob/main/model.safetensors), but in addition these repositories have many adapter weights stored in the repository, *e.g.* under the name [`adapter.fra.safetensors`](https://huggingface.co/facebook/mms-1b-all/blob/main/adapter.fra.safetensors) for French. The Hugging Face docs [explain very well how such checkpoints can be used for inference](https://huggingface.co/docs/transformers/main/en/model_doc/mms#loading), so in this blog post we will instead focus on learning how we can efficiently train highly performant adapter models based on any of the released ASR checkpoints. ## Training adaptive weights In machine learning, adapters are a method used to fine-tune pre-trained models while keeping the original model parameters unchanged. They do this by inserting small, trainable modules, called [adapter layers](https://arxiv.org/pdf/1902.00751.pdf), between the pre-existing layers of the model, which then adapt the model to a specific task without requiring extensive retraining. Adapters have a long history in speech recognition and especially **speaker recognition**. In speaker recognition, adapters have been effectively used to tweak pre-existing models to recognize individual speaker idiosyncrasies, as highlighted in [Gales and Woodland's (1996)](https://www.isca-speech.org/archive_v0/archive_papers/icslp_1996/i96_1832.pdf) and [Miao et al.'s (2014)](https://www.cs.cmu.edu/~ymiao/pub/tasl_sat.pdf) work. This approach not only greatly reduces computational requirements compared to training the full model, but also allows for better and more flexible speaker-specific adjustments. The work done in **MMS** leverages this idea of adapters for speech recognition across different languages. A small number of adapter weights are fine-tuned to grasp unique phonetic and grammatical traits of each target language. Thereby, MMS enables a single large base model (*e.g.*, the [**`mms-1b-all`**](https://huggingface.co/facebook/mms-1b-all) checkpoint) and 1000+ small adapter layers (2.5M weights each for **`mms-1b-all`**) to comprehend and transcribe multiple languages. This dramatically reduces the computational demand of developing distinct models for each language. Great! Now that we understood the motivation and theory, let's look into fine-tuning adapter weights for **`mms-1b-all`** 🔥 ## Notebook Setup As done previously in the ["Fine-tuning XLS-R on Multi-Lingual ASR"](https://huggingface.co/blog/fine-tune-xlsr-wav2vec2) blog post, we fine-tune the model on the low resource ASR dataset of [Common Voice](https://huggingface.co/datasets/common_voice) that contains only *ca.* 4h of validated training data. Just like Wav2Vec2 or XLS-R, MMS is fine-tuned using Connectionist Temporal Classification (CTC), which is an algorithm that is used to train neural networks for sequence-to-sequence problems, such as ASR and handwriting recognition. For more details on the CTC algorithm, I highly recommend reading the well-written blog post [*Sequence Modeling with CTC (2017)*](https://distill.pub/2017/ctc/) by Awni Hannun. Before we start, let's install `datasets` and `transformers`. Also, we need `torchaudio` to load audio files and `jiwer` to evaluate our fine-tuned model using the [word error rate (WER)](https://huggingface.co/metrics/wer) metric \\( {}^1 \\). ```bash %%capture !pip install --upgrade pip !pip install datasets[audio] !pip install evaluate !pip install git+https://github.com/huggingface/transformers.git !pip install jiwer !pip install accelerate ``` We strongly suggest to upload your training checkpoints directly to the [🤗 Hub](https://huggingface.co/) while training. The Hub repositories have version control built in, so you can be sure that no model checkpoint is lost during training. To do so you have to store your authentication token from the Hugging Face website (sign up [here](https://huggingface.co/join) if you haven't already!) ```python from huggingface_hub import notebook_login notebook_login() ``` ## Prepare Data, Tokenizer, Feature Extractor ASR models transcribe speech to text, which means that we both need a feature extractor that processes the speech signal to the model's input format, *e.g.* a feature vector, and a tokenizer that processes the model's output format to text. In 🤗 Transformers, the MMS model is thus accompanied by both a feature extractor, called [Wav2Vec2FeatureExtractor](https://huggingface.co/transformers/master/model_doc/wav2vec2.html#wav2vec2featureextractor), and a tokenizer, called [Wav2Vec2CTCTokenizer](https://huggingface.co/transformers/master/model_doc/wav2vec2.html#wav2vec2ctctokenizer). Let's start by creating the tokenizer to decode the predicted output classes to the output transcription. ### Create `Wav2Vec2CTCTokenizer` Fine-tuned MMS models, such as [**`mms-1b-all`**](https://huggingface.co/facebook/mms-1b-all) already have a [tokenizer](https://huggingface.co/facebook/mms-1b-all/blob/main/tokenizer_config.json) accompanying the model checkpoint. However since we want to fine-tune the model on specific low-resource data of a certain language, it is recommended to fully remove the tokenizer and vocabulary output layer, and simply create new ones based on the training data itself. Wav2Vec2-like models fine-tuned on CTC transcribe an audio file with a single forward pass by first processing the audio input into a sequence of processed context representations and then using the final vocabulary output layer to classify each context representation to a character that represents the transcription. The output size of this layer corresponds to the number of tokens in the vocabulary, which we will extract from the labeled dataset used for fine-tuning. So in the first step, we will take a look at the chosen dataset of Common Voice and define a vocabulary based on the transcriptions. For this notebook, we will use [Common Voice's 6.1 dataset](https://huggingface.co/datasets/mozilla-foundation/common_voice_6_1) for Turkish. Turkish corresponds to the language code `"tr"`. Great, now we can use 🤗 Datasets' simple API to download the data. The dataset name is `"mozilla-foundation/common_voice_6_1"`, the configuration name corresponds to the language code, which is `"tr"` in our case. **Note**: Before being able to download the dataset, you have to access it by logging into your Hugging Face account, going on the [dataset repo page](https://huggingface.co/datasets/mozilla-foundation/common_voice_6_1) and clicking on "Agree and Access repository" Common Voice has many different splits including `invalidated`, which refers to data that was not rated as "clean enough" to be considered useful. In this notebook, we will only make use of the splits `"train"`, `"validation"` and `"test"`. Because the Turkish dataset is so small, we will merge both the validation and training data into a training dataset and only use the test data for validation. ```python from datasets import load_dataset, load_metric, Audio common_voice_train = load_dataset("mozilla-foundation/common_voice_6_1", "tr", split="train+validation", use_auth_token=True) common_voice_test = load_dataset("mozilla-foundation/common_voice_6_1", "tr", split="test", use_auth_token=True) ``` Many ASR datasets only provide the target text (`'sentence'`) for each audio array (`'audio'`) and file (`'path'`). Common Voice actually provides much more information about each audio file, such as the `'accent'`, etc. Keeping the notebook as general as possible, we only consider the transcribed text for fine-tuning. ```python common_voice_train = common_voice_train.remove_columns(["accent", "age", "client_id", "down_votes", "gender", "locale", "segment", "up_votes"]) common_voice_test = common_voice_test.remove_columns(["accent", "age", "client_id", "down_votes", "gender", "locale", "segment", "up_votes"]) ``` Let's write a short function to display some random samples of the dataset and run it a couple of times to get a feeling for the transcriptions. ```python from datasets import ClassLabel import random import pandas as pd from IPython.display import display, HTML def show_random_elements(dataset, num_examples=10): assert num_examples <= len(dataset), "Can't pick more elements than there are in the dataset." picks = [] for _ in range(num_examples): pick = random.randint(0, len(dataset)-1) while pick in picks: pick = random.randint(0, len(dataset)-1) picks.append(pick) df = pd.DataFrame(dataset[picks]) display(HTML(df.to_html())) ``` ```python show_random_elements(common_voice_train.remove_columns(["path", "audio"]), num_examples=10) ``` ```bash Oylar teker teker elle sayılacak. Son olaylar endişe seviyesini yükseltti. Tek bir kart hepsinin kapılarını açıyor. Blogcular da tam bundan bahsetmek istiyor. Bu Aralık iki bin onda oldu. Fiyatın altmış altı milyon avro olduğu bildirildi. Ardından da silahlı çatışmalar çıktı. "Romanya'da kurumlar gelir vergisi oranı yüzde on altı." Bu konuda neden bu kadar az şey söylendiğini açıklayabilir misiniz? ``` Alright! The transcriptions look fairly clean. Having translated the transcribed sentences, it seems that the language corresponds more to written-out text than noisy dialogue. This makes sense considering that [Common Voice](https://huggingface.co/datasets/common_voice) is a crowd-sourced read speech corpus. We can see that the transcriptions contain some special characters, such as `,.?!;:`. Without a language model, it is much harder to classify speech chunks to such special characters because they don't really correspond to a characteristic sound unit. *E.g.*, the letter `"s"` has a more or less clear sound, whereas the special character `"."` does not. Also in order to understand the meaning of a speech signal, it is usually not necessary to include special characters in the transcription. Let's simply remove all characters that don't contribute to the meaning of a word and cannot really be represented by an acoustic sound and normalize the text. ```python import re chars_to_remove_regex = '[\,\?\.\!\-\;\:\"\“\%\‘\”\�\']' def remove_special_characters(batch): batch["sentence"] = re.sub(chars_to_remove_regex, '', batch["sentence"]).lower() return batch ``` ```python common_voice_train = common_voice_train.map(remove_special_characters) common_voice_test = common_voice_test.map(remove_special_characters) ``` Let's look at the processed text labels again. ```python show_random_elements(common_voice_train.remove_columns(["path","audio"])) ``` ```bash i̇kinci tur müzakereler eylül ayında başlayacak jani ve babası bu düşüncelerinde yalnız değil onurun gözlerindeki büyü bandiç oyların yüzde kırk sekiz virgül elli dördünü topladı bu imkansız bu konu açık değildir cinayet kamuoyunu şiddetle sarstı kentin sokakları iki metre su altında kaldı muhalefet partileri hükümete karşı ciddi bir mücadele ortaya koyabiliyorlar mı festivale tüm dünyadan elli film katılıyor ``` Good! This looks better. We have removed most special characters from transcriptions and normalized them to lower-case only. Before finalizing the pre-processing, it is always advantageous to consult a native speaker of the target language to see whether the text can be further simplified. For this blog post, [Merve](https://twitter.com/mervenoyann) was kind enough to take a quick look and noted that "hatted" characters - like `â` - aren't really used anymore in Turkish and can be replaced by their "un-hatted" equivalent, *e.g.* `a`. This means that we should replace a sentence like `"yargı sistemi hâlâ sağlıksız"` to `"yargı sistemi hala sağlıksız"`. Let's write another short mapping function to further simplify the text labels. Remember - the simpler the text labels, the easier it is for the model to learn to predict those labels. ```python def replace_hatted_characters(batch): batch["sentence"] = re.sub('[â]', 'a', batch["sentence"]) batch["sentence"] = re.sub('[î]', 'i', batch["sentence"]) batch["sentence"] = re.sub('[ô]', 'o', batch["sentence"]) batch["sentence"] = re.sub('[û]', 'u', batch["sentence"]) return batch ``` ```python common_voice_train = common_voice_train.map(replace_hatted_characters) common_voice_test = common_voice_test.map(replace_hatted_characters) ``` In CTC, it is common to classify speech chunks into letters, so we will do the same here. Let's extract all distinct letters of the training and test data and build our vocabulary from this set of letters. We write a mapping function that concatenates all transcriptions into one long transcription and then transforms the string into a set of chars. It is important to pass the argument `batched=True` to the `map(...)` function so that the mapping function has access to all transcriptions at once. ```python def extract_all_chars(batch): all_text = " ".join(batch["sentence"]) vocab = list(set(all_text)) return {"vocab": [vocab], "all_text": [all_text]} ``` ```python vocab_train = common_voice_train.map(extract_all_chars, batched=True, batch_size=-1, keep_in_memory=True, remove_columns=common_voice_train.column_names) vocab_test = common_voice_test.map(extract_all_chars, batched=True, batch_size=-1, keep_in_memory=True, remove_columns=common_voice_test.column_names) ``` Now, we create the union of all distinct letters in the training dataset and test dataset and convert the resulting list into an enumerated dictionary. ```python vocab_list = list(set(vocab_train["vocab"][0]) | set(vocab_test["vocab"][0])) ``` ```python vocab_dict = {v: k for k, v in enumerate(sorted(vocab_list))} vocab_dict ``` ```bash {' ': 0, 'a': 1, 'b': 2, 'c': 3, 'd': 4, 'e': 5, 'f': 6, 'g': 7, 'h': 8, 'i': 9, 'j': 10, 'k': 11, 'l': 12, 'm': 13, 'n': 14, 'o': 15, 'p': 16, 'q': 17, 'r': 18, 's': 19, 't': 20, 'u': 21, 'v': 22, 'w': 23, 'x': 24, 'y': 25, 'z': 26, 'ç': 27, 'ë': 28, 'ö': 29, 'ü': 30, 'ğ': 31, 'ı': 32, 'ş': 33, '̇': 34} ``` Cool, we see that all letters of the alphabet occur in the dataset (which is not really surprising) and we also extracted the special characters `""` and `'`. Note that we did not exclude those special characters because the model has to learn to predict when a word is finished, otherwise predictions would always be a sequence of letters that would make it impossible to separate words from each other. One should always keep in mind that pre-processing is a very important step before training your model. E.g., we don't want our model to differentiate between `a` and `A` just because we forgot to normalize the data. The difference between `a` and `A` does not depend on the "sound" of the letter at all, but more on grammatical rules - *e.g.* use a capitalized letter at the beginning of the sentence. So it is sensible to remove the difference between capitalized and non-capitalized letters so that the model has an easier time learning to transcribe speech. To make it clearer that `" "` has its own token class, we give it a more visible character `|`. In addition, we also add an "unknown" token so that the model can later deal with characters not encountered in Common Voice's training set. ```python vocab_dict["|"] = vocab_dict[" "] del vocab_dict[" "] ``` Finally, we also add a padding token that corresponds to CTC's "*blank token*". The "blank token" is a core component of the CTC algorithm. For more information, please take a look at the "Alignment" section [here](https://distill.pub/2017/ctc/). ```python vocab_dict["[UNK]"] = len(vocab_dict) vocab_dict["[PAD]"] = len(vocab_dict) len(vocab_dict) ``` ```bash 37 ``` Cool, now our vocabulary is complete and consists of 37 tokens, which means that the linear layer that we will add on top of the pretrained MMS checkpoint as part of the adapter weights will have an output dimension of 37. Since a single MMS checkpoint can provide customized weights for multiple languages, the tokenizer can also consist of multiple vocabularies. Therefore, we need to nest our `vocab_dict` to potentially add more languages to the vocabulary in the future. The dictionary should be nested with the name that is used for the adapter weights and that is saved in the tokenizer config under the name [`target_lang`](https://huggingface.co/docs/transformers/model_doc/wav2vec2#transformers.Wav2Vec2CTCTokenizer.target_lang). Let's use the ISO-639-3 language codes like the original [**`mms-1b-all`**](https://huggingface.co/facebook/mms-1b-all) checkpoint. ```python target_lang = "tur" ``` Let's define an empty dictionary to which we can append the just created vocabulary ```python new_vocab_dict = {target_lang: vocab_dict} ``` **Note**: In case you want to use this notebook to add a new adapter layer to *an existing model repo* make sure to **not** create an empty, new vocab dict, but instead re-use one that already exists. To do so you should uncomment the following cells and replace `"patrickvonplaten/wav2vec2-large-mms-1b-turkish-colab"` with a model repo id to which you want to add your adapter weights. ```python # from transformers import Wav2Vec2CTCTokenizer # mms_adapter_repo = "patrickvonplaten/wav2vec2-large-mms-1b-turkish-colab" # make sure to replace this path with a repo to which you want to add your new adapter weights # tokenizer = Wav2Vec2CTCTokenizer.from_pretrained(mms_adapter_repo) # new_vocab = tokenizer.vocab # new_vocab[target_lang] = vocab_dict ``` Let's now save the vocabulary as a json file. ```python import json with open('vocab.json', 'w') as vocab_file: json.dump(new_vocab_dict, vocab_file) ``` In a final step, we use the json file to load the vocabulary into an instance of the `Wav2Vec2CTCTokenizer` class. ```python from transformers import Wav2Vec2CTCTokenizer tokenizer = Wav2Vec2CTCTokenizer.from_pretrained("./", unk_token="[UNK]", pad_token="[PAD]", word_delimiter_token="|", target_lang=target_lang) ``` If one wants to re-use the just created tokenizer with the fine-tuned model of this notebook, it is strongly advised to upload the `tokenizer` to the [🤗 Hub](https://huggingface.co/). Let's call the repo to which we will upload the files `"wav2vec2-large-mms-1b-turkish-colab"`: ```python repo_name = "wav2vec2-large-mms-1b-turkish-colab" ``` and upload the tokenizer to the [🤗 Hub](https://huggingface.co/). ```python tokenizer.push_to_hub(repo_name) ``` ```bash CommitInfo(commit_url='https://huggingface.co/patrickvonplaten/wav2vec2-large-mms-1b-turkish-colab/commit/48cccbfd6059aa6ce655e9d94b8358ba39536cb7', commit_message='Upload tokenizer', commit_description='', oid='48cccbfd6059aa6ce655e9d94b8358ba39536cb7', pr_url=None, pr_revision=None, pr_num=None) ``` Great, you can see the just created repository under `https://huggingface.co/<your-username>/wav2vec2-large-mms-1b-tr-colab` ### Create `Wav2Vec2FeatureExtractor` Speech is a continuous signal and to be treated by computers, it first has to be discretized, which is usually called **sampling**. The sampling rate hereby plays an important role in that it defines how many data points of the speech signal are measured per second. Therefore, sampling with a higher sampling rate results in a better approximation of the *real* speech signal but also necessitates more values per second. A pretrained checkpoint expects its input data to have been sampled more or less from the same distribution as the data it was trained on. The same speech signals sampled at two different rates have a very different distribution, *e.g.*, doubling the sampling rate results in twice as many data points. Thus, before fine-tuning a pretrained checkpoint of an ASR model, it is crucial to verify that the sampling rate of the data that was used to pretrain the model matches the sampling rate of the dataset used to fine-tune the model. A `Wav2Vec2FeatureExtractor` object requires the following parameters to be instantiated: - `feature_size`: Speech models take a sequence of feature vectors as an input. While the length of this sequence obviously varies, the feature size should not. In the case of Wav2Vec2, the feature size is 1 because the model was trained on the raw speech signal \\( {}^2 \\). - `sampling_rate`: The sampling rate at which the model is trained on. - `padding_value`: For batched inference, shorter inputs need to be padded with a specific value - `do_normalize`: Whether the input should be *zero-mean-unit-variance* normalized or not. Usually, speech models perform better when normalizing the input - `return_attention_mask`: Whether the model should make use of an `attention_mask` for batched inference. In general, XLS-R models checkpoints should **always** use the `attention_mask`. ```python from transformers import Wav2Vec2FeatureExtractor feature_extractor = Wav2Vec2FeatureExtractor(feature_size=1, sampling_rate=16000, padding_value=0.0, do_normalize=True, return_attention_mask=True) ``` Great, MMS's feature extraction pipeline is thereby fully defined! For improved user-friendliness, the feature extractor and tokenizer are *wrapped* into a single `Wav2Vec2Processor` class so that one only needs a `model` and `processor` object. ```python from transformers import Wav2Vec2Processor processor = Wav2Vec2Processor(feature_extractor=feature_extractor, tokenizer=tokenizer) ``` Next, we can prepare the dataset. ### Preprocess Data So far, we have not looked at the actual values of the speech signal but just the transcription. In addition to `sentence`, our datasets include two more column names `path` and `audio`. `path` states the absolute path of the audio file and `audio` represent already loaded audio data. MMS expects the input in the format of a 1-dimensional array of 16 kHz. This means that the audio file has to be loaded and resampled. Thankfully, `datasets` does this automatically when the column name is `audio`. Let's try it out. ```python common_voice_train[0]["audio"] ``` ```bash {'path': '/root/.cache/huggingface/datasets/downloads/extracted/71ba9bd154da9d8c769b736301417178729d2b87b9e00cda59f6450f742ed778/cv-corpus-6.1-2020-12-11/tr/clips/common_voice_tr_17346025.mp3', 'array': array([ 0.00000000e+00, -2.98378618e-13, -1.59835903e-13, ..., -2.01663317e-12, -1.87991593e-12, -1.17969588e-12]), 'sampling_rate': 48000} ``` In the example above we can see that the audio data is loaded with a sampling rate of 48kHz whereas the model expects 16kHz, as we saw. We can set the audio feature to the correct sampling rate by making use of [`cast_column`](https://huggingface.co/docs/datasets/package_reference/main_classes.html?highlight=cast_column#datasets.DatasetDict.cast_column): ```python common_voice_train = common_voice_train.cast_column("audio", Audio(sampling_rate=16_000)) common_voice_test = common_voice_test.cast_column("audio", Audio(sampling_rate=16_000)) ``` Let's take a look at `"audio"` again. ```python common_voice_train[0]["audio"] ``` {'path': '/root/.cache/huggingface/datasets/downloads/extracted/71ba9bd154da9d8c769b736301417178729d2b87b9e00cda59f6450f742ed778/cv-corpus-6.1-2020-12-11/tr/clips/common_voice_tr_17346025.mp3', 'array': array([ 9.09494702e-13, -6.13908924e-12, -1.09139364e-11, ..., 1.81898940e-12, 4.54747351e-13, 3.63797881e-12]), 'sampling_rate': 16000} This seemed to have worked! Let's do a final check that the data is correctly prepared, by printing the shape of the speech input, its transcription, and the corresponding sampling rate. ```python rand_int = random.randint(0, len(common_voice_train)-1) print("Target text:", common_voice_train[rand_int]["sentence"]) print("Input array shape:", common_voice_train[rand_int]["audio"]["array"].shape) print("Sampling rate:", common_voice_train[rand_int]["audio"]["sampling_rate"]) ``` ```bash Target text: bağış anlaşması bir ağustosta imzalandı Input array shape: (70656,) Sampling rate: 16000 ``` Good! Everything looks fine - the data is a 1-dimensional array, the sampling rate always corresponds to 16kHz, and the target text is normalized. Finally, we can leverage `Wav2Vec2Processor` to process the data to the format expected by `Wav2Vec2ForCTC` for training. To do so let's make use of Dataset's [`map(...)`](https://huggingface.co/docs/datasets/package_reference/main_classes.html?highlight=map#datasets.DatasetDict.map) function. First, we load and resample the audio data, simply by calling `batch["audio"]`. Second, we extract the `input_values` from the loaded audio file. In our case, the `Wav2Vec2Processor` only normalizes the data. For other speech models, however, this step can include more complex feature extraction, such as [Log-Mel feature extraction](https://en.wikipedia.org/wiki/Mel-frequency_cepstrum). Third, we encode the transcriptions to label ids. **Note**: This mapping function is a good example of how the `Wav2Vec2Processor` class should be used. In "normal" context, calling `processor(...)` is redirected to `Wav2Vec2FeatureExtractor`'s call method. When wrapping the processor into the `as_target_processor` context, however, the same method is redirected to `Wav2Vec2CTCTokenizer`'s call method. For more information please check the [docs](https://huggingface.co/transformers/master/model_doc/wav2vec2.html#transformers.Wav2Vec2Processor.__call__). ```python def prepare_dataset(batch): audio = batch["audio"] # batched output is "un-batched" batch["input_values"] = processor(audio["array"], sampling_rate=audio["sampling_rate"]).input_values[0] batch["input_length"] = len(batch["input_values"]) batch["labels"] = processor(text=batch["sentence"]).input_ids return batch ``` Let's apply the data preparation function to all examples. ```python common_voice_train = common_voice_train.map(prepare_dataset, remove_columns=common_voice_train.column_names) common_voice_test = common_voice_test.map(prepare_dataset, remove_columns=common_voice_test.column_names) ``` **Note**: `datasets` automatically takes care of audio loading and resampling. If you wish to implement your own costumized data loading/sampling, feel free to just make use of the `"path"` column instead and disregard the `"audio"` column. Awesome, now we are ready to start training! ## Training The data is processed so that we are ready to start setting up the training pipeline. We will make use of 🤗's [Trainer](https://huggingface.co/transformers/master/main_classes/trainer.html?highlight=trainer) for which we essentially need to do the following: - Define a data collator. In contrast to most NLP models, MMS has a much larger input length than output length. *E.g.*, a sample of input length 50000 has an output length of no more than 100. Given the large input sizes, it is much more efficient to pad the training batches dynamically meaning that all training samples should only be padded to the longest sample in their batch and not the overall longest sample. Therefore, fine-tuning MMS requires a special padding data collator, which we will define below - Evaluation metric. During training, the model should be evaluated on the word error rate. We should define a `compute_metrics` function accordingly - Load a pretrained checkpoint. We need to load a pretrained checkpoint and configure it correctly for training. - Define the training configuration. After having fine-tuned the model, we will correctly evaluate it on the test data and verify that it has indeed learned to correctly transcribe speech. ### Set-up Trainer Let's start by defining the data collator. The code for the data collator was copied from [this example](https://github.com/huggingface/transformers/blob/7e61d56a45c19284cfda0cee8995fb552f6b1f4e/examples/pytorch/speech-recognition/run_speech_recognition_ctc.py#L219). Without going into too many details, in contrast to the common data collators, this data collator treats the `input_values` and `labels` differently and thus applies two separate padding functions on them (again making use of MMS processor's context manager). This is necessary because, in speech recognition, input and output are of different modalities so they should not be treated by the same padding function. Analogous to the common data collators, the padding tokens in the labels with `-100` so that those tokens are **not** taken into account when computing the loss. ```python import torch from dataclasses import dataclass, field from typing import Any, Dict, List, Optional, Union @dataclass class DataCollatorCTCWithPadding: """ Data collator that will dynamically pad the inputs received. Args: processor (:class:`~transformers.Wav2Vec2Processor`) The processor used for proccessing the data. padding (:obj:`bool`, :obj:`str` or :class:`~transformers.tokenization_utils_base.PaddingStrategy`, `optional`, defaults to :obj:`True`): Select a strategy to pad the returned sequences (according to the model's padding side and padding index) among: * :obj:`True` or :obj:`'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence if provided). * :obj:`'max_length'`: Pad to a maximum length specified with the argument :obj:`max_length` or to the maximum acceptable input length for the model if that argument is not provided. * :obj:`False` or :obj:`'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different lengths). """ processor: Wav2Vec2Processor padding: Union[bool, str] = True def __call__(self, features: List[Dict[str, Union[List[int], torch.Tensor]]]) -> Dict[str, torch.Tensor]: # split inputs and labels since they have to be of different lenghts and need # different padding methods input_features = [{"input_values": feature["input_values"]} for feature in features] label_features = [{"input_ids": feature["labels"]} for feature in features] batch = self.processor.pad( input_features, padding=self.padding, return_tensors="pt", ) labels_batch = self.processor.pad( labels=label_features, padding=self.padding, return_tensors="pt", ) # replace padding with -100 to ignore loss correctly labels = labels_batch["input_ids"].masked_fill(labels_batch.attention_mask.ne(1), -100) batch["labels"] = labels return batch ``` ```python data_collator = DataCollatorCTCWithPadding(processor=processor, padding=True) ``` Next, the evaluation metric is defined. As mentioned earlier, the predominant metric in ASR is the word error rate (WER), hence we will use it in this notebook as well. ```python from evaluate import load wer_metric = load("wer") ``` The model will return a sequence of logit vectors: \\( \mathbf{y}_1, \ldots, \mathbf{y}_m \\) with \\( \mathbf{y}_1 = f_{\theta}(x_1, \ldots, x_n)[0] \\) and \\( n >> m \\). A logit vector \\( \mathbf{y}_1 \\) contains the log-odds for each word in the vocabulary we defined earlier, thus \\( \text{len}(\mathbf{y}_i) = \\) `config.vocab_size`. We are interested in the most likely prediction of the model and thus take the `argmax(...)` of the logits. Also, we transform the encoded labels back to the original string by replacing `-100` with the `pad_token_id` and decoding the ids while making sure that consecutive tokens are **not** grouped to the same token in CTC style \\( {}^1 \\). ```python def compute_metrics(pred): pred_logits = pred.predictions pred_ids = np.argmax(pred_logits, axis=-1) pred.label_ids[pred.label_ids == -100] = processor.tokenizer.pad_token_id pred_str = processor.batch_decode(pred_ids) # we do not want to group tokens when computing the metrics label_str = processor.batch_decode(pred.label_ids, group_tokens=False) wer = wer_metric.compute(predictions=pred_str, references=label_str) return {"wer": wer} ``` Now, we can load the pretrained checkpoint of [`mms-1b-all`](https://huggingface.co/facebook/mms-1b-all). The tokenizer's `pad_token_id` must be to define the model's `pad_token_id` or in the case of `Wav2Vec2ForCTC` also CTC's *blank token* \\( {}^2 \\). Since, we're only training a small subset of weights, the model is not prone to overfitting. Therefore, we make sure to disable all dropout layers. **Note**: When using this notebook to train MMS on another language of Common Voice those hyper-parameter settings might not work very well. Feel free to adapt those depending on your use case. ```python from transformers import Wav2Vec2ForCTC model = Wav2Vec2ForCTC.from_pretrained( "facebook/mms-1b-all", attention_dropout=0.0, hidden_dropout=0.0, feat_proj_dropout=0.0, layerdrop=0.0, ctc_loss_reduction="mean", pad_token_id=processor.tokenizer.pad_token_id, vocab_size=len(processor.tokenizer), ignore_mismatched_sizes=True, ) ``` ```bash Some weights of Wav2Vec2ForCTC were not initialized from the model checkpoint at facebook/mms-1b-all and are newly initialized because the shapes did not match: - lm_head.bias: found shape torch.Size([154]) in the checkpoint and torch.Size([39]) in the model instantiated - lm_head.weight: found shape torch.Size([154, 1280]) in the checkpoint and torch.Size([39, 1280]) in the model instantiated You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. ``` **Note**: It is expected that some weights are newly initialized. Those weights correspond to the newly initialized vocabulary output layer. We now want to make sure that only the adapter weights will be trained and that the rest of the model stays frozen. First, we re-initialize all the adapter weights which can be done with the handy `init_adapter_layers` method. It is also possible to not re-initilize the adapter weights and continue fine-tuning, but in this case one should make sure to load fitting adapter weights via the [`load_adapter(...)` method](https://huggingface.co/docs/transformers/main/en/model_doc/wav2vec2#transformers.Wav2Vec2ForCTC.load_adapter) before training. Often the vocabulary still will not match the custom training data very well though, so it's usually easier to just re-initialize all adapter layers so that they can be easily fine-tuned. ```python model.init_adapter_layers() ``` Next, we freeze all weights, **but** the adapter layers. ```python model.freeze_base_model() adapter_weights = model._get_adapters() for param in adapter_weights.values(): param.requires_grad = True ``` In a final step, we define all parameters related to training. To give more explanation on some of the parameters: - `group_by_length` makes training more efficient by grouping training samples of similar input length into one batch. This can significantly speed up training time by heavily reducing the overall number of useless padding tokens that are passed through the model - `learning_rate` was chosen to be 1e-3 which is a common default value for training with Adam. Other learning rates might work equally well. For more explanations on other parameters, one can take a look at the [docs](https://huggingface.co/transformers/master/main_classes/trainer.html?highlight=trainer#trainingarguments). To save GPU memory, we enable PyTorch's [gradient checkpointing](https://pytorch.org/docs/stable/checkpoint.html) and also set the loss reduction to "*mean*". MMS adapter fine-tuning converges extremely fast to very good performance, so even for a dataset as small as 4h we will only train for 4 epochs. During training, a checkpoint will be uploaded asynchronously to the hub every 200 training steps. It allows you to also play around with the demo widget even while your model is still training. **Note**: If one does not want to upload the model checkpoints to the hub, simply set `push_to_hub=False`. ```python from transformers import TrainingArguments training_args = TrainingArguments( output_dir=repo_name, group_by_length=True, per_device_train_batch_size=32, evaluation_strategy="steps", num_train_epochs=4, gradient_checkpointing=True, fp16=True, save_steps=200, eval_steps=100, logging_steps=100, learning_rate=1e-3, warmup_steps=100, save_total_limit=2, push_to_hub=True, ) ``` Now, all instances can be passed to Trainer and we are ready to start training! ```python from transformers import Trainer trainer = Trainer( model=model, data_collator=data_collator, args=training_args, compute_metrics=compute_metrics, train_dataset=common_voice_train, eval_dataset=common_voice_test, tokenizer=processor.feature_extractor, ) ``` ------------------------------------------------------------------------ \\( {}^1 \\) To allow models to become independent of the speaker rate, in CTC, consecutive tokens that are identical are simply grouped as a single token. However, the encoded labels should not be grouped when decoding since they don't correspond to the predicted tokens of the model, which is why the `group_tokens=False` parameter has to be passed. If we wouldn't pass this parameter a word like `"hello"` would incorrectly be encoded, and decoded as `"helo"`. \\( {}^2 \\) The blank token allows the model to predict a word, such as `"hello"` by forcing it to insert the blank token between the two l's. A CTC-conform prediction of `"hello"` of our model would be `[PAD] [PAD] "h" "e" "e" "l" "l" [PAD] "l" "o" "o" [PAD]`. ### Training Training should take less than 30 minutes depending on the GPU used. ```python trainer.train() ``` | Training Loss | Training Steps | Validation Loss | Wer | |:-------------:|:----:|:---------------:|:------:| | 4.905 | 100 | 0.215| 0.280 | | 0.290 | 200 | 0.167 | 0.232 | | 0.2659 | 300 | 0.161 | 0.229 | | 0.2398 | 400 | 0.156 | 0.223 | The training loss and validation WER go down nicely. We see that fine-tuning adapter layers of `mms-1b-all` for just 100 steps outperforms fine-tuning the whole `xls-r-300m` checkpoint shown [here](https://huggingface.co/blog/fine-tune-xlsr-wav2vec2#training-1) already by a large margin. From the [official paper](https://scontent-cdg4-3.xx.fbcdn.net/v/t39.8562-6/348827959_6967534189927933_6819186233244071998_n.pdf?_nc_cat=104&ccb=1-7&_nc_sid=ad8a9d&_nc_ohc=fSo3qQ7uxr0AX8EWnWl&_nc_ht=scontent-cdg4-3.xx&oh=00_AfBL34K0MAAPb0CgnthjbHfiB6pSnnwbn5esj9DZVPvyoA&oe=6495E802) and this quick comparison it becomes clear that `mms-1b-all` has a much higher capability of transfering knowledge to a low-resource language and should be preferred over `xls-r-300m`. In addition, training is also more memory-efficient as only a small subset of layers are trained. The adapter weights will be uploaded as part of the model checkpoint, but we also want to make sure to save them separately so that they can easily be off- and onloaded. Let's save all the adapter layers into the training output dir so that it can be correctly uploaded to the Hub. ```python from safetensors.torch import save_file as safe_save_file from transformers.models.wav2vec2.modeling_wav2vec2 import WAV2VEC2_ADAPTER_SAFE_FILE import os adapter_file = WAV2VEC2_ADAPTER_SAFE_FILE.format(target_lang) adapter_file = os.path.join(training_args.output_dir, adapter_file) safe_save_file(model._get_adapters(), adapter_file, metadata={"format": "pt"}) ``` Finally, you can upload the result of the training to the 🤗 Hub. ```python trainer.push_to_hub() ``` One of the main advantages of adapter weights training is that the "base" model which makes up roughly 99% of the model weights is kept unchanged and only a small [2.5M adapter checkpoint](https://huggingface.co/patrickvonplaten/wav2vec2-large-mms-1b-turkish-colab/blob/main/adapter.tur.safetensors) has to be shared in order to use the trained checkpoint. This makes it extremely simple to train additional adapter layers and add them to your repository. You can do so very easily by simply re-running this script and changing the language you would like to train on to a different one, *e.g.* `swe` for Swedish. In addition, you should make sure that the vocabulary does not get completely overwritten but that the new language vocabulary is **appended** to the existing one as stated above in the commented out cells. To demonstrate how different adapter layers can be loaded, I have trained and uploaded also an adapter layer for Swedish under the iso language code `swe` as you can see [here](https://huggingface.co/patrickvonplaten/wav2vec2-large-mms-1b-turkish-colab/blob/main/adapter.swe.safetensors) You can load the fine-tuned checkpoint as usual by using `from_pretrained(...)`, but you should make sure to also add a `target_lang="<your-lang-code>"` to the method so that the correct adapter is loaded. You should also set the target language correctly for your tokenizer. Let's see how we can load the Turkish checkpoint first. ```python model_id = "patrickvonplaten/wav2vec2-large-mms-1b-turkish-colab" model = Wav2Vec2ForCTC.from_pretrained(model_id, target_lang="tur").to("cuda") processor = Wav2Vec2Processor.from_pretrained(model_id) processor.tokenizer.set_target_lang("tur") ``` Let's check that the model can correctly transcribe Turkish ```python from datasets import Audio common_voice_test_tr = load_dataset("mozilla-foundation/common_voice_6_1", "tr", data_dir="./cv-corpus-6.1-2020-12-11", split="test", use_auth_token=True) common_voice_test_tr = common_voice_test_tr.cast_column("audio", Audio(sampling_rate=16_000)) ``` Let's process the audio, run a forward pass and predict the ids ```python input_dict = processor(common_voice_test_tr[0]["audio"]["array"], sampling_rate=16_000, return_tensors="pt", padding=True) logits = model(input_dict.input_values.to("cuda")).logits pred_ids = torch.argmax(logits, dim=-1)[0] ``` Finally, we can decode the example. ```python print("Prediction:") print(processor.decode(pred_ids)) print("\nReference:") print(common_voice_test_tr[0]["sentence"].lower()) ``` **Output**: ```bash Prediction: pekçoğuda roman toplumundan geliyor Reference: pek çoğu da roman toplumundan geliyor. ``` This looks like it's almost exactly right, just two empty spaces should have been added in the first word. Now it is very simple to change the adapter to Swedish by calling [`model.load_adapter(...)`](mozilla-foundation/common_voice_6_1) and by changing the tokenizer to Swedish as well. ```python model.load_adapter("swe") processor.tokenizer.set_target_lang("swe") ``` We again load the Swedish test set from common voice ```python common_voice_test_swe = load_dataset("mozilla-foundation/common_voice_6_1", "sv-SE", data_dir="./cv-corpus-6.1-2020-12-11", split="test", use_auth_token=True) common_voice_test_swe = common_voice_test_swe.cast_column("audio", Audio(sampling_rate=16_000)) ``` and transcribe a sample: ```python input_dict = processor(common_voice_test_swe[0]["audio"]["array"], sampling_rate=16_000, return_tensors="pt", padding=True) logits = model(input_dict.input_values.to("cuda")).logits pred_ids = torch.argmax(logits, dim=-1)[0] print("Prediction:") print(processor.decode(pred_ids)) print("\nReference:") print(common_voice_test_swe[0]["sentence"].lower()) ``` **Output**: ```bash Prediction: jag lämnade grovjobbet åt honom Reference: jag lämnade grovjobbet åt honom. ``` Great, this looks like a perfect transcription! We've shown in this blog post how MMS Adapter Weights fine-tuning not only gives state-of-the-art performance on low-resource languages, but also significantly speeds up training time and allows to easily build a collection of customized adapter weights. *Related posts and additional links are listed here:* - [**Official paper**](https://huggingface.co/papers/2305.13516) - [**Original cobebase**](https://github.com/facebookresearch/fairseq/tree/main/examples/mms/asr) - [**Official demo**](https://huggingface.co/spaces/facebook/MMS) - [**Transformers Docs**](https://huggingface.co/docs/transformers/index) - [**Related XLS-R blog post**](https://huggingface.co/blog/fine-tune-xlsr-wav2vec2) - [**Models on the Hub**](https://huggingface.co/models?other=mms)
8
0
hf_public_repos
hf_public_repos/blog/protectai.md
--- title: "Hugging Face Teams Up with Protect AI: Enhancing Model Security for the ML Community" thumbnail: /blog/assets/protectai/thumbnail.png authors: - user: mcpotato --- We are pleased to announce our partnership with Protect AI, as part of our [long-standing commitment](https://huggingface.co/blog/2024-security-features) to provide a safe and reliable platform for the ML community. [Protect AI](https://protectai.com/) is a company founded with a mission to create a safer AI-powered world. They are developing powerful tools, namely [Guardian](https://protectai.com/guardian), to ensure that the rapid pace of AI innovation can continue without compromising security. Our decision to partner with Protect AI stems from their [community driven](https://huntr.com/) approach to security, active support of [open source](https://github.com/protectai), and expertise in all things security x AI. > [!TIP] > Interested in joining our security partnership / providing scanning information on the Hub? Please get in touch with us over at [email protected]. ## Model security refresher To share models, we serialize weights, configs and other data structures we use to interact with the models, in order to facilitate storage and transport. Some serialization formats are vulnerable to nasty exploits, such as arbitrary code execution (looking at you pickle), making shared models that use those formats potentially dangerous. As Hugging Face has become a popular platform for model sharing, we’d like to help protect the community from this, hence why we have developed tools like [picklescan](https://github.com/mmaitre314/picklescan) and why we are integrating Guardian in our scanner suite. Pickle is not the only exploitable format out there, [see for reference](https://github.com/Azure/counterfit/wiki/Abusing-ML-model-file-formats-to-create-malware-on-AI-systems:-A-proof-of-concept) how one can exploit Keras Lambda layers to achieve arbitrary code execution. The good news is that Guardian catches both of these exploits and more in additional file formats – see their [Knowledge Base](https://protectai.com/insights/knowledge-base/) for up to date scanner information. > [!TIP] > Read all our documentation on security here: https://huggingface.co/docs/hub/security 🔥 ## Integration While integrating Guardian as a third-party scanner, we have used this as an opportunity to revamp our frontend to display scan results. Here is what it now looks like: <img class="block" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/hub/third-party-scans-list.png"/> <img class="block" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/hub/security-scanner-status-banner.png"/> <img class="block" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/hub/security-scanner-pickle-import-list.png"/> <em>As you can see here, an additional Pickle button is present when a pickle import scan occurred</em> As you can see from the screenshot, there's nothing you have to do to benefit from this! All public model repositories will be scanned by Guardian automatically as soon as you push your files to the Hub. Here is an example repository you can check out to see the feature in action: [mcpotato/42-eicar-street](https://huggingface.co/mcpotato/42-eicar-street). Note that you might not see a scan for your model as of today, as we have over 1 million model repos. It may take us some time to catch up 😅. In total, we have already scanned hundreds of millions of files, because we believe that empowering the community to share models in a safe and frictionless manner will lead to growth for the whole field.
9
0
hf_public_repos/blog
hf_public_repos/blog/zh/falcon-180b.md
--- title: "Falcon 180B 登陆 Hugging Face Hub 🔥" thumbnail: /blog/assets/162_falcon_180b/thumbnail.jpg authors: - user: philschmid - user: osanseviero - user: pcuenq - user: lvwerra - user: slippylolo --- # Falcon 180B 登陆 Hugging Face Hub 🔥 ## 引言 **我们很高兴地宣布由 Technology Innovation Institute (TII) 训练的开源大模型 Falcon 180B 登陆 Hugging Face!** Falcon 180B 为开源大模型树立了全新的标杆。作为当前最大的开源大模型,有180B 参数并且是在在 3.5 万亿 token 的 TII [RefinedWeb](https://hf.co/datasets/tiiuae/falcon-refinedweb) 数据集上进行训练,这也是目前开源模型里最长的单波段预训练。 你可以在 Hugging Face Hub 中查阅其 [基础模型](https://hf.co/tiiuae/falcon-180B)、[聊天模型](https://hf.co/tiiuae/falcon-180B-chat),以及其 [Space 应用](https://hf.co/spaces/tiiuae/falcon-180b-demo)。 从表现能力上来看,Falcon 180B 在自然语言任务上的表现十分优秀。它在开源模型排行榜 (预训练) 上名列前茅,并可与 PaLM-2 等专有模型相差无几。虽然目前还很难给出明确的排名,但它被认为与 PaLM-2 Large 不相上下,这也使得它成为目前公开的能力最强的 LLM 之一。 我们将在本篇博客中通过评测结果来探讨 Falcon 180B 的优势所在,并展示如何在自己的硬件上使用该模型。 ## Falcon 180B 是什么? 从架构维度来看,Falcon 180B 是 Falcon 40B 的升级版本,并在其基础上进行了创新,比如利用 Multi-Query Attention 等来提高模型的可扩展性。可以通过回顾 Falcon 40B 的博客 [Falcon 40B](https://hf.co/blog/zh/falcon) 来了解其架构。Falcon 180B 是使用 Amazon SageMaker 在多达 4096 个 GPU 上同时对 3.5 万亿个 token 进行训练,总共花费了约 7,000,000 个 GPU 计算时,这意味着 Falcon 180B 的规模是 Llama 2 的 2.5 倍,而训练所需的计算量是 Llama 2 的 4 倍。 其训练数据主要来自 RefinedWeb 数据集 (大约占 85%),此外,它还在对话、技术论文和一小部分代码 (约占 3%) 等经过整理的混合数据的基础上进行了训练。这个预训练数据集足够大,即使是 3.5 万亿个标记也只占不到一个时期 (epoch)。 已发布的 [聊天模型](https://hf.co/tiiuae/falcon-180B-chat) 在对话和指令数据集上进行了微调,混合了 [Open-Platypus](https://hf.co/datasets/garage-bAInd/Open-Platypus)、[UltraChat](https://hf.co/datasets/stingning/ultrachat) 和 [Airoboros](https://hf.co/datasets/jondurbin/airoboros-2.1) 数据集。 ‼️ 商业用途: Falcon 180b 可用于商业用途,但条件非常严格,不包括任何“托管用途”。如果您有兴趣将其用于商业用途,我们建议您查看 [许可证](https://hf.co/spaces/tiiuae/falcon-180b-license/blob/main/LICENSE.txt) 并咨询您的法律团队。 ## Falcon 180B 的优势是什么? Falcon 180B 是当前最好的开源大模型。在 MMLU上 的表现超过了 Llama 2 70B 和 OpenAI 的 GPT-3.5。在 HellaSwag、LAMBADA、WebQuestions、Winogrande、PIQA、ARC、BoolQ、CB、COPA、RTE、WiC、WSC 及 ReCoRD 上与谷歌的 PaLM 2-Large 不相上下。 ![](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/blog/162_falcon_180b/palm2_480.jpg) 它在 Hugging Face 开源大模型榜单上以 68.74 的成绩被认为是当前评分最高的开放式大模型,评分超过了 Meta 的 LlaMA 2 (67.35)。 | Model | Size | Leaderboard score | Commercial use or license | Pretraining length | | ------- | ---- | ----------------- | ------------------------- | ------------------ | | Falcon | 180B | 68.74 | 🟠 | 3,500B | | Llama 2 | 70B | 67.35 | 🟠 | 2,000B | | LLaMA | 65B | 64.23 | 🔴 | 1,400B | | Falcon | 40B | 61.48 | 🟢 | 1,000B | | MPT | 30B | 56.15 | 🟢 | 1,000B | ![open_llm_leaderboard.jpg](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/blog/162_falcon_180b/open_llm_leaderboard.jpg) ## 如何使用 Falcon 180B? 从 Transfomers 4.33 开始,Falcon 180B 可以在 Hugging Face 生态中使用和下载。 ### Demo 你可以在 [这个 Hugging Face Space](https://hf.co/spaces/HuggingFaceH4/falcon-chat) 或以下场景中体验 Falcon 180B 的 demo。 <script type="module" src="https://gradio.s3-us-west-2.amazonaws.com/3.42.0/gradio.js"> </script> <gradio-app theme_mode="light" space="tiiuae/falcon-180b-chat"></gradio-app> ### 硬件要求 | | 类型 | 种类 | 最低要求 | 配置示例 | | ----------- | --------- | ---------------- | ------------------- | --------------- | | Falcon 180B | Training | Full fine-tuning | 5120GB | 8x 8x A100 80GB | | Falcon 180B | Training | LoRA with ZeRO-3 | 1280GB | 2x 8x A100 80GB | | Falcon 180B | Training | QLoRA | 160GB | 2x A100 80GB | | Falcon 180B | Inference | BF16/FP16 | 640GB | 8x A100 80GB | | Falcon 180B | Inference | GPTQ/int4 | 320GB | 8x A100 40GB | ### Prompt 格式 其基础模型没有 Prompt 格式,因为它并不是一个对话型大模型也不是通过指令进行的训练,所以它并不会以对话形式回应。预训练模型是微调的绝佳平台,但或许你不该直接使用。其对话模型则设有一个简单的对话模式。 ```bash System: Add an optional system prompt here User: This is the user input Falcon: This is what the model generates User: This might be a second turn input Falcon: and so on ``` ### Transformers 随着 Transfomers 4.33 发布,你可以在 Hugging Face 上使用 Falcon 180B 并且借助 HF 生态里的所有工具,比如: 训练和推理脚本及示例 安全文件格式 (safetensor) 与 bitsandbytes (4 位量化)、PEFT (参数高效微调) 和 GPTQ 等工具集成 辅助生成 (也称为“推测解码”) RoPE 扩展支持更大的上下文长度 丰富而强大的生成参数 在使用这个模型之前,你需要接受它的许可证和使用条款。请确保你已经登录了自己的 Hugging Face 账号,并安装了最新版本的 transformers: ```bash pip install --upgrade transformers huggingface-cli login ``` **bfloat16** 以下是如何在 `bfloat16` 中使用基础模型的方法。Falcon 180B 是一个大型模型,所以请注意它的硬件要求。 ```python from transformers import AutoTokenizer, AutoModelForCausalLM import transformers import torch model_id = "tiiuae/falcon-180B" tokenizer = AutoTokenizer.from_pretrained(model_id) model = AutoModelForCausalLM.from_pretrained( model_id, torch_dtype=torch.bfloat16, device_map="auto", ) prompt = "My name is Pedro, I live in" inputs = tokenizer(prompt, return_tensors="pt").to("cuda") output = model.generate( input_ids=inputs["input_ids"], attention_mask=inputs["attention_mask"], do_sample=True, temperature=0.6, top_p=0.9, max_new_tokens=50, ) output = output[0].to("cpu") print(tokenizer.decode(output) ``` 这可能会产生如下输出结果: ``` My name is Pedro, I live in Portugal and I am 25 years old. I am a graphic designer, but I am also passionate about photography and video. I love to travel and I am always looking for new adventures. I love to meet new people and explore new places. ``` **使用 8 位和 4 位的 bitsandbytes** Falcon 180B 的 8 位和 4 位量化版本在评估方面与 `bfloat16` 几乎没有差别!这对推理来说是个好消息,因为你可以放心地使用量化版本来降低硬件要求。请记住,在 8 位版本进行推理要比 4 位版本快得多。 要使用量化,你需要安装“bitsandbytes”库,并在加载模型时启用相应的标志: ```python model = AutoModelForCausalLM.from_pretrained( model_id, torch_dtype=torch.bfloat16, **load_in_8bit=True,** device_map="auto", ) ``` **对话模型** 如上所述,为跟踪对话而微调的模型版本使用了非常直接的训练模板。我们必须遵循同样的模式才能运行聊天式推理。作为参考,你可以看看聊天演示中的 `[format_prompt](https://hf.co/spaces/hf-extreme-scale/falcon-180b-chat-demo/blob/main/app.py#L19)` 函数: ```python def format_prompt(message, history, system_prompt): prompt = "" if system_prompt: prompt += f"System: {system_prompt}\n" for user_prompt, bot_response in history: prompt += f"User: {user_prompt}\n" prompt += f"Falcon: {bot_response}\n" prompt += f"User: {message}\nFalcon:" return prompt ``` 如你所见,用户的交互和模型的回应前面都有 `User:` 和 `Falcon:` 分隔符。我们将它们连接在一起,形成一个包含整个对话历史的提示。我们可以提供一个系统提示来调整生成风格。 ## 其他资源 - [模型页面](https://hf.co/models?other=falcon&sort=trending&search=180) - [Space 应用](https://hf.co/spaces/tiiuae/falcon-180b-chat) - [Falcon 180B 已登陆 Hugging Face 生态系统](https://hf.co/blog/zh/falcon-180b) (本文) - [官方公告](https://falconllm.tii.ae/falcon-models.html) ## 致谢 在我们的生态中发布并持续支持与评估这样一个模型离不开众多社区成员的贡献,这其中包括 Clémentine 和 Eleuther Evaluation Harness 对 LLM 的评估; Loubna 与 BigCode 对代码的评估; Nicolas 对推理方面的支持; Lysandre、Matt、Daniel、Amy、Joao 和 Arthur 将 Falcon 集成到 transformers 中。感谢 Baptiste 和 Patrick 编写开源示例。感谢 Thom、Lewis、TheBloke、Nouamane 和 Tim Dettmers 鼎力贡献让这些能发布。最后,感谢 HF Cluster 为运行 LLM 推理和一个开源免费的模型 demo 提供的大力支持。
0
0
hf_public_repos/blog
hf_public_repos/blog/zh/pycharm-integration.md
--- title: "Hugging Face 与 PyCharm 深度集成:轻松引入丰富的 AI 模型" thumbnail: /blog/assets/pycharm-integration/thumbnail.png authors: - user: rocketknight1 translators: - user: chenglu --- # Hugging Face 与 PyCharm 深度集成:轻松引入丰富的 AI 模型 这是一个平平无奇的星期二早晨,作为一名 Transformers 库的维护者,我照例做着每天工作日早上都要做的事情:打开 [PyCharm](https://jb.gg/get-pycharm-hf),加载 Transformers 代码库,充满感情地浏览 [聊天模板文档](https://huggingface.co/docs/transformers/main/chat_templating),并努力“无视”当天有 50 个用户问题在等我处理。但今天有些不一样的地方: ![screenshot 0](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/blog/pycharm-integration/screenshot_0.png) 有什么……等等!我们放大看看! ![screenshot 1](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/blog/pycharm-integration/screenshot_1.png) 那是……? ![screenshot 2](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/blog/pycharm-integration/screenshot_2.png) 给今天提出 issue 的用户说声抱歉,你们的问题显然可能不会得到回复了。因为我要聊聊聊聊 PyCharm 中的 Hugging Face 集成! ## Hugging Face 就在你最熟悉的地方 我可以通过列出功能来介绍这个集成,但那样未免有些乏味,况且我们还有 [文档](https://www.jetbrains.com/help/pycharm/hugging-face.html)。不如让我们通过实际操作来看看它的用法。假设我要写一个 Python 应用程序,我希望它能与用户进行聊天。不仅是文本聊天——用户还可以粘贴图片,并让应用程序自然地对图片进行讨论。 如果你对当前机器学习的前沿技术不太熟悉,这个需求可能会让你感到胆战心惊,但别害怕。只需在你的代码中右键点击,选择“插入 HF 模型”。然后会弹出一个对话框: ![dialog_box_screenshot](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/blog/pycharm-integration/dialog_box_screenshot.png) 能够处理图像和文本聊天的模型称为“image-text-to-text”类型:用户可以提供图像和文本,模型则输出文本。在左侧下拉菜单中找到它。默认情况下,模型列表按点赞数排序——不过要记住,老模型往往会积累很多点赞数,即使它们可能不再是最先进的了。 我们可以通过模型名称下方的更新时间查看模型的更新日期。让我们选择一个既新又受欢迎的模型:`microsoft/Phi-3.5-vision-instruct`。 对于某些模型类别,你可以点击“使用模型”按钮,让系统自动在你的笔记本中插入一些基础代码。不过,更好的方法通常是浏览右侧的模型卡片,复制其中的示例代码。对话框右侧显示的模型卡片和 Hugging Face Hub 上的完全一致。让我们复制示例代码并粘贴到我们的代码中! ![code_snippet_screenshot](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/blog/pycharm-integration/code_snippet_screenshot.png) 你公司的网络安全人员可能会对你直接复制一大段陌生代码并运行感到不满,但如果他们抱怨,你只需“无视”他们,然后继续运行代码。 看吧:我们现在有了一个能够愉快聊天的模型——在这个例子中,它能读取并评论一份微软的演示幻灯片截图。你可以随意试试看这个例子,尝试你的对话或自己的图片。一旦成功运行,只需将这段代码封装进一个类中,你的应用就可以用了。这样,我们在十分钟内获得了最先进的开源机器学习功能,连浏览器都没打开过。 > **提示** > 这些模型可能很大!如果遇到内存不足的错误,可以尝试使用更大内存的 GPU,或者减少示例代码中的 20。你也可以去掉 `device_map="cuda"`,把模型放到 CPU 内存中,虽然速度会变慢。 ## 即时模型卡 接下来,我们换个视角。假设你不是这段代码的作者,而是一个需要审查代码的同事。也许你是那个因为被无视了而且还在生气的网络安全人员。你看到这段代码,完全不知道自己在看什么。别慌——只需将鼠标悬停在模型名称上,整个模型卡片会立刻弹出来。你可以快速验证模型的来源及其预期用途。 (如果你是那种两周后就忘记自己写过什么代码的人,这个功能也非常有用) ![model_card_screenshot](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/blog/pycharm-integration/model_card_screenshot.png) ## 本地模型缓存 你可能会注意到,第一次运行代码时,模型需要下载,但之后加载速度就快多了。模型已经被存储在你的本地缓存中了。还记得之前那个神秘的小 🤗 图标吗?点击它,你就能看到缓存中的所有内容: ![model_cache_screenshot](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/blog/pycharm-integration/model_cache_screenshot.png) 这个功能非常方便,可以查看你正在使用的模型,并在不再需要时清理以节省磁盘空间。对于两周后的“记忆缺失”情景,这也很有帮助——如果你不记得当时用的模型是什么,很可能就在这里。不过要记住,2024 年大多数有用的、适合生产的模型都超过 1GB,因此缓存很快会被填满! ## 人工智能时代的 Python 在 Hugging Face,我们认为开源人工智能是开源哲学的自然延伸:开源软件解决开发者和用户的问题,为他们提供可以集成到代码中的新能力,而开源模型也提供了同样的便利。人们往往容易被复杂性迷惑,过分关注实现细节,因为一切都如此新奇有趣,但模型的存在是为了 **为你解决问题**。如果抽象掉架构和训练的细节,模型本质上是 **函数** ——你代码中的工具,可以将某种输入转换成某种输出。 因此,这些功能是非常合适的补充。正如 IDE 已经能为你显示函数签名和文档字符串,它们现在也能为你展示示例代码和模型卡。像这样的集成可以让你像导入其他库一样方便地导入聊天或图像识别模型。我们相信这就是代码未来的发展方向,希望这些功能能对你有所帮助! **[下载 PyCharm](https://jb.gg/get-pycharm-hf) 并体验 Hugging Face 集成。** **使用代码 PyCharm4HF 获取免费 3 个月的 PyCharm 订阅 [点击这里](http://jetbrains.com/store/redeem/)。**
1
0
hf_public_repos/blog
hf_public_repos/blog/.vscode/settings.json
{ "deno.enable": true }
2
0
hf_public_repos
hf_public_repos/block_movement_pruning/setup.py
from setuptools import setup def readme(): with open('README.rst') as f: return f.read() setup(name='block_movement_pruning', version='0.1', description='block_movement_pruning is a python package for experimenting on block-sparse pruned version of popular networks.', long_description=readme(), classifiers=[ 'Development Status :: 3 - Alpha', 'License :: OSI Approved :: MIT License', 'Programming Language :: Python :: 3.0', 'Topic :: Text Processing', ], keywords='', url='', author='', author_email='', license='MIT', packages=['block_movement_pruning'], entry_points={ 'console_scripts': ['block_movement_pruning_run=block_movement_pruning.command_line:train_command'], }, include_package_data=True, zip_safe=False)
3
0
hf_public_repos
hf_public_repos/block_movement_pruning/requirements.txt
torch>=1.4.0 -e git+https://github.com/huggingface/transformers.git@352d5472b0c1dec0f420d606d16747d851b4bda8#egg=transformers knockknock>=0.1.8.1 h5py>=2.10.0 numpy>=1.18.2 scipy>=1.4.1
4
0
hf_public_repos
hf_public_repos/block_movement_pruning/Saving_PruneBERT.ipynb
# Includes import h5py import os import json from collections import OrderedDict from scipy import sparse import numpy as np import torch from torch import nn from transformers import * os.chdir('../../')# Load fine-pruned model and quantize the model model = BertForQuestionAnswering.from_pretrained("huggingface/prunebert-base-uncased-6-finepruned-w-distil-squad") model.to('cpu') quantized_model = torch.quantization.quantize_dynamic( model=model, qconfig_spec = { torch.nn.Linear : torch.quantization.default_dynamic_qconfig, }, dtype=torch.qint8, ) # print(quantized_model) qtz_st = quantized_model.state_dict()# Saving the original (encoder + classifier) in the standard torch.save format dense_st = {name: param for name, param in model.state_dict().items() if "embedding" not in name and "pooler" not in name} torch.save(dense_st, 'dbg/dense_squad.pt',) dense_mb_size = os.path.getsize("dbg/dense_squad.pt") # Elementary representation: we decompose the quantized tensors into (scale, zero_point, int_repr). # See https://pytorch.org/docs/stable/quantization.html # We further leverage the fact that int_repr is sparse matrix to optimize the storage: we decompose int_repr into # its CSR representation (data, indptr, indices). elementary_qtz_st = {} for name, param in qtz_st.items(): if "dtype" not in name and param.is_quantized: print("Decompose quantization for", name) # We need to extract the scale, the zero_point and the int_repr for the quantized tensor and modules scale = param.q_scale() # torch.tensor(1,) - float32 zero_point = param.q_zero_point() # torch.tensor(1,) - int32 elementary_qtz_st[f"{name}.scale"] = scale elementary_qtz_st[f"{name}.zero_point"] = zero_point # We assume the int_repr is sparse and compute its CSR representation # Only the FCs in the encoder are actually sparse int_repr = param.int_repr() # torch.tensor(nb_rows, nb_columns) - int8 int_repr_cs = sparse.csr_matrix(int_repr) # scipy.sparse.csr.csr_matrix elementary_qtz_st[f"{name}.int_repr.data"] = int_repr_cs.data # np.array int8 elementary_qtz_st[f"{name}.int_repr.indptr"] = int_repr_cs.indptr # np.array int32 assert max(int_repr_cs.indices) < 65535 # If not, we shall fall back to int32 elementary_qtz_st[f"{name}.int_repr.indices"] = np.uint16(int_repr_cs.indices) # np.array uint16 elementary_qtz_st[f"{name}.int_repr.shape"] = int_repr_cs.shape # tuple(int, int) else: elementary_qtz_st[name] = param # Create mapping from torch.dtype to string description (we could also used an int8 instead of string) str_2_dtype = {"qint8": torch.qint8} dtype_2_str = {torch.qint8: "qint8"} # Saving the pruned (encoder + classifier) in the standard torch.save format dense_optimized_st = {name: param for name, param in elementary_qtz_st.items() if "embedding" not in name and "pooler" not in name} torch.save(dense_optimized_st, 'dbg/dense_squad_optimized.pt',) print("Encoder Size (MB) - Sparse & Quantized - `torch.save`:", round(os.path.getsize("dbg/dense_squad_optimized.pt")/1e6, 2)) # Save the decomposed state_dict with an HDF5 file # Saving only the encoder + QA Head with h5py.File('dbg/squad_sparse.h5','w') as hf: for name, param in elementary_qtz_st.items(): if "embedding" in name: print(f"Skip {name}") continue if "pooler" in name: print(f"Skip {name}") continue if type(param) == torch.Tensor: if param.numel() == 1: # module scale # module zero_point hf.attrs[name] = param continue if param.requires_grad: # LayerNorm param = param.detach().numpy() hf.create_dataset(name, data=param, compression="gzip", compression_opts=9) elif type(param) == float or type(param) == int or type(param) == tuple: # float - tensor _packed_params.weight.scale # int - tensor _packed_params.weight.zero_point # tuple - tensor _packed_params.weight.shape hf.attrs[name] = param elif type(param) == torch.dtype: # dtype - tensor _packed_params.dtype hf.attrs[name] = dtype_2_str[param] else: hf.create_dataset(name, data=param, compression="gzip", compression_opts=9) with open('dbg/metadata.json', 'w') as f: f.write(json.dumps(qtz_st._metadata)) size = os.path.getsize("dbg/squad_sparse.h5") + os.path.getsize("dbg/metadata.json") print("") print("Encoder Size (MB) - Dense: ", round(dense_mb_size/1e6, 2)) print("Encoder Size (MB) - Sparse & Quantized:", round(size/1e6, 2)) # Save the decomposed state_dict to HDF5 storage # Save everything in the architecutre (embedding + encoder + QA Head) with h5py.File('dbg/squad_sparse_with_embs.h5','w') as hf: for name, param in elementary_qtz_st.items(): # if "embedding" in name: # print(f"Skip {name}") # continue # if "pooler" in name: # print(f"Skip {name}") # continue if type(param) == torch.Tensor: if param.numel() == 1: # module scale # module zero_point hf.attrs[name] = param continue if param.requires_grad: # LayerNorm param = param.detach().numpy() hf.create_dataset(name, data=param, compression="gzip", compression_opts=9) elif type(param) == float or type(param) == int or type(param) == tuple: # float - tensor _packed_params.weight.scale # int - tensor _packed_params.weight.zero_point # tuple - tensor _packed_params.weight.shape hf.attrs[name] = param elif type(param) == torch.dtype: # dtype - tensor _packed_params.dtype hf.attrs[name] = dtype_2_str[param] else: hf.create_dataset(name, data=param, compression="gzip", compression_opts=9) with open('dbg/metadata.json', 'w') as f: f.write(json.dumps(qtz_st._metadata)) size = os.path.getsize("dbg/squad_sparse_with_embs.h5") + os.path.getsize("dbg/metadata.json") print('\nSize (MB):', round(size/1e6, 2)) # Reconstruct the elementary state dict reconstructed_elementary_qtz_st = {} hf = h5py.File('dbg/squad_sparse_with_embs.h5','r') for attr_name, attr_param in hf.attrs.items(): if 'shape' in attr_name: attr_param = tuple(attr_param) elif ".scale" in attr_name: if "_packed_params" in attr_name: attr_param = float(attr_param) else: attr_param = torch.tensor(attr_param) elif ".zero_point" in attr_name: if "_packed_params" in attr_name: attr_param = int(attr_param) else: attr_param = torch.tensor(attr_param) elif ".dtype" in attr_name: attr_param = str_2_dtype[attr_param] reconstructed_elementary_qtz_st[attr_name] = attr_param # print(f"Unpack {attr_name}") # Get the tensors/arrays for data_name, data_param in hf.items(): if "LayerNorm" in data_name or "_packed_params.bias" in data_name: reconstructed_elementary_qtz_st[data_name] = torch.from_numpy(np.array(data_param)) elif "embedding" in data_name: reconstructed_elementary_qtz_st[data_name] = torch.from_numpy(np.array(data_param)) else: # _packed_params.weight.int_repr.data, _packed_params.weight.int_repr.indices and _packed_params.weight.int_repr.indptr data_param = np.array(data_param) if "indices" in data_name: data_param = np.array(data_param, dtype=np.int32) reconstructed_elementary_qtz_st[data_name] = data_param # print(f"Unpack {data_name}") hf.close()# Sanity checks for name, param in reconstructed_elementary_qtz_st.items(): assert name in elementary_qtz_st for name, param in elementary_qtz_st.items(): assert name in reconstructed_elementary_qtz_st, name for name, param in reconstructed_elementary_qtz_st.items(): assert type(param) == type(elementary_qtz_st[name]), name if type(param) == torch.Tensor: assert torch.all(torch.eq(param, elementary_qtz_st[name])), name elif type(param) == np.ndarray: assert (param == elementary_qtz_st[name]).all(), name else: assert param == elementary_qtz_st[name], name# Re-assemble the sparse int_repr from the CSR format reconstructed_qtz_st = {} for name, param in reconstructed_elementary_qtz_st.items(): if "weight.int_repr.indptr" in name: prefix_ = name[:-16] data = reconstructed_elementary_qtz_st[f"{prefix_}.int_repr.data"] indptr = reconstructed_elementary_qtz_st[f"{prefix_}.int_repr.indptr"] indices = reconstructed_elementary_qtz_st[f"{prefix_}.int_repr.indices"] shape = reconstructed_elementary_qtz_st[f"{prefix_}.int_repr.shape"] int_repr = sparse.csr_matrix(arg1=(data, indices, indptr), shape=shape) int_repr = torch.tensor(int_repr.todense()) scale = reconstructed_elementary_qtz_st[f"{prefix_}.scale"] zero_point = reconstructed_elementary_qtz_st[f"{prefix_}.zero_point"] weight = torch._make_per_tensor_quantized_tensor(int_repr, scale, zero_point) reconstructed_qtz_st[f"{prefix_}"] = weight elif "int_repr.data" in name or "int_repr.shape" in name or "int_repr.indices" in name or \ "weight.scale" in name or "weight.zero_point" in name: continue else: reconstructed_qtz_st[name] = param # Sanity checks for name, param in reconstructed_qtz_st.items(): assert name in qtz_st for name, param in qtz_st.items(): assert name in reconstructed_qtz_st, name for name, param in reconstructed_qtz_st.items(): assert type(param) == type(qtz_st[name]), name if type(param) == torch.Tensor: assert torch.all(torch.eq(param, qtz_st[name])), name elif type(param) == np.ndarray: assert (param == qtz_st[name]).all(), name else: assert param == qtz_st[name], name# Load the re-constructed state dict into a model dummy_model = BertForQuestionAnswering.from_pretrained('bert-base-uncased') dummy_model.to('cpu') reconstructed_qtz_model = torch.quantization.quantize_dynamic( model=dummy_model, qconfig_spec = None, dtype=torch.qint8, ) reconstructed_qtz_st = OrderedDict(reconstructed_qtz_st) with open('dbg/metadata.json', 'r') as read_file: metadata = json.loads(read_file.read()) reconstructed_qtz_st._metadata = metadata reconstructed_qtz_model.load_state_dict(reconstructed_qtz_st)# Sanity checks on the infernce N = 32 for _ in range(25): inputs = torch.randint(low=0, high=30000, size=(N, 128)) mask = torch.ones(size=(N, 128)) y_reconstructed = reconstructed_qtz_model(input_ids=inputs, attention_mask=mask)[0] y = quantized_model(input_ids=inputs, attention_mask=mask)[0] assert torch.all(torch.eq(y, y_reconstructed)) print("Sanity check passed")
5
0
hf_public_repos
hf_public_repos/block_movement_pruning/MANIFEST.in
include README.rst
6
0
hf_public_repos
hf_public_repos/block_movement_pruning/README.md
# Movement Pruning: Adaptive Sparsity by Fine-Tuning *Magnitude pruning is a widely used strategy for reducing model size in pure supervised learning; however, it is less effective in the transfer learning regime that has become standard for state-of-the-art natural language processing applications. We propose the use of *movement pruning*, a simple, deterministic first-order weight pruning method that is more adaptive to pretrained model fine-tuning. Experiments show that when pruning large pretrained language models, movement pruning shows significant improvements in high-sparsity regimes. When combined with distillation, the approach achieves minimal accuracy loss with down to only 3% of the model parameters:* | Fine-pruning+Distillation<br>(Teacher=BERT-base fine-tuned) | BERT base<br>fine-tuned | Remaining<br>Weights (%) | Magnitude Pruning | L0 Regularization | Movement Pruning | Soft Movement Pruning | | :---: | :---: | :---: | :---: | :---: | :---: | :---: | | SQuAD - Dev<br>EM/F1 | 80.4/88.1 | 10%<br>3% | 70.2/80.1<br>45.5/59.6 | 72.4/81.9<br>64.3/75.8 | 75.6/84.3<br>67.5/78.0 | **76.6/84.9**<br>**72.7/82.3** | | MNLI - Dev<br>acc/MM acc | 84.5/84.9 | 10%<br>3% | 78.3/79.3<br>69.4/70.6 | 78.7/79.7<br>76.0/76.2 | 80.1/80.4<br>76.5/77.4 | **81.2/81.8**<br>**79.5/80.1** | | QQP - Dev<br>acc/F1 | 91.4/88.4 | 10%<br>3% | 79.8/65.0<br>72.4/57.8 | 88.1/82.8<br>87.0/81.9 | 89.7/86.2<br>86.1/81.5 | **90.2/86.8**<br>**89.1/85.5** | This page contains information on how to fine-prune pre-trained models such as `BERT` to obtain extremely sparse models with movement pruning. In contrast to magnitude pruning which selects weights that are far from 0, movement pruning retains weights that are moving away from 0. For more information, we invite you to check out [our paper](https://arxiv.org/abs/2005.07683). You can also have a look at this fun *Explain Like I'm Five* introductory [slide deck](https://www.slideshare.net/VictorSanh/movement-pruning-explain-like-im-five-234205241). <div align="center"> <img src="https://www.seekpng.com/png/detail/166-1669328_how-to-make-emmental-cheese-at-home-icooker.png" width="400"> </div> ## Extreme sparsity and efficient storage One promise of extreme pruning is to obtain extremely small models that can be easily sent (and stored) on edge devices. By setting weights to 0., we reduce the amount of information we need to store, and thus decreasing the memory size. We are able to obtain extremely sparse fine-pruned models with movement pruning: ~95% of the dense performance with ~5% of total remaining weights in the BERT encoder. In [this notebook](https://github.com/huggingface/transformers/blob/master/examples/movement-pruning/Saving_PruneBERT.ipynb), we showcase how we can leverage standard tools that exist out-of-the-box to efficiently store an extremely sparse question answering model (only 6% of total remaining weights in the encoder). We are able to reduce the memory size of the encoder **from the 340MB (the orignal dense BERT) to 11MB**, without any additional training of the model (every operation is performed *post fine-pruning*). It is sufficiently small to store it on a [91' floppy disk](https://en.wikipedia.org/wiki/Floptical) 📎! While movement pruning does not directly optimize for memory footprint (but rather the number of non-null weights), we hypothetize that further memory compression ratios can be achieved with specific quantization aware trainings (see for instance [Q8BERT](https://arxiv.org/abs/1910.06188), [And the Bit Goes Down](https://arxiv.org/abs/1907.05686) or [Quant-Noise](https://arxiv.org/abs/2004.07320)). ## Fine-pruned models As examples, we release two English PruneBERT checkpoints (models fine-pruned from a pre-trained `BERT` checkpoint), one on SQuAD and the other on MNLI. - **`prunebert-base-uncased-6-finepruned-w-distil-squad`**<br/> Pre-trained `BERT-base-uncased` fine-pruned with soft movement pruning on SQuAD v1.1. We use an additional distillation signal from `BERT-base-uncased` finetuned on SQuAD. The encoder counts 6% of total non-null weights and reaches 83.8 F1 score. The model can be accessed with: `pruned_bert = BertForQuestionAnswering.from_pretrained("huggingface/prunebert-base-uncased-6-finepruned-w-distil-squad")` - **`prunebert-base-uncased-6-finepruned-w-distil-mnli`**<br/> Pre-trained `BERT-base-uncased` fine-pruned with soft movement pruning on MNLI. We use an additional distillation signal from `BERT-base-uncased` finetuned on MNLI. The encoder counts 6% of total non-null weights and reaches 80.7 (matched) accuracy. The model can be accessed with: `pruned_bert = BertForSequenceClassification.from_pretrained("huggingface/prunebert-base-uncased-6-finepruned-w-distil-mnli")` ## How to fine-prune? ### Setup The code relies on the 🤗 Transformers library. In addition to the dependencies listed in the [`examples`](https://github.com/huggingface/transformers/tree/master/examples) folder, you should install a few additional dependencies listed in the `requirements.txt` file: `pip install -r requirements.txt`. Note that we built our experiments on top of a stabilized version of the library (commit https://github.com/huggingface/transformers/commit/352d5472b0c1dec0f420d606d16747d851b4bda8): we do not guarantee that everything is still compatible with the latest version of the master branch. ### Fine-pruning with movement pruning Below, we detail how to reproduce the results reported in the paper. We use SQuAD as a running example. Commands (and scripts) can be easily adapted for other tasks. The following command fine-prunes a pre-trained `BERT-base` on SQuAD using movement pruning towards 15% of remaining weights (85% sparsity). Note that we freeze all the embeddings modules (from their pre-trained value) and only prune the Fully Connected layers in the encoder (12 layers of Transformer Block). ```bash SERIALIZATION_DIR=<OUTPUT_DIR> SQUAD_DATA=squad_data mkdir $SQUAD_DATA cd $SQUAD_DATA wget -q https://rajpurkar.github.io/SQuAD-explorer/dataset/dev-v1.1.json wget -q https://rajpurkar.github.io/SQuAD-explorer/dataset/train-v1.1.json cd .. python examples/movement-pruning/masked_run_squad.py \ --output_dir $SERIALIZATION_DIR \ --data_dir $SQUAD_DATA \ --train_file train-v1.1.json \ --predict_file dev-v1.1.json \ --do_train --do_eval --do_lower_case \ --model_type masked_bert \ --model_name_or_path bert-base-uncased \ --per_gpu_train_batch_size 16 \ --warmup_steps 5400 \ --num_train_epochs 10 \ --learning_rate 3e-5 --mask_scores_learning_rate 1e-2 \ --initial_threshold 1 --final_threshold 0.15 \ --initial_warmup 1 --final_warmup 2 \ --pruning_method topK --mask_init constant --mask_scale 0. ``` ### Fine-pruning with other methods We can also explore other fine-pruning methods by changing the `pruning_method` parameter: Soft movement pruning ```bash python examples/movement-pruning/masked_run_squad.py \ --output_dir $SERIALIZATION_DIR \ --data_dir $SQUAD_DATA \ --train_file train-v1.1.json \ --predict_file dev-v1.1.json \ --do_train --do_eval --do_lower_case \ --model_type masked_bert \ --model_name_or_path bert-base-uncased \ --per_gpu_train_batch_size 16 \ --warmup_steps 5400 \ --num_train_epochs 10 \ --learning_rate 3e-5 --mask_scores_learning_rate 1e-2 \ --initial_threshold 0 --final_threshold 0.1 \ --initial_warmup 1 --final_warmup 2 \ --pruning_method sigmoied_threshold --mask_init constant --mask_scale 0. \ --regularization l1 --final_lambda 400. ``` L0 regularization ```bash python examples/movement-pruning/masked_run_squad.py \ --output_dir $SERIALIZATION_DIR \ --data_dir $SQUAD_DATA \ --train_file train-v1.1.json \ --predict_file dev-v1.1.json \ --do_train --do_eval --do_lower_case \ --model_type masked_bert \ --model_name_or_path bert-base-uncased \ --per_gpu_train_batch_size 16 \ --warmup_steps 5400 \ --num_train_epochs 10 \ --learning_rate 3e-5 --mask_scores_learning_rate 1e-1 \ --initial_threshold 1. --final_threshold 1. \ --initial_warmup 1 --final_warmup 1 \ --pruning_method l0 --mask_init constant --mask_scale 2.197 \ --regularization l0 --final_lambda 125. ``` Iterative Magnitude Pruning ```bash python examples/movement-pruning/masked_run_squad.py \ --output_dir ./dbg \ --data_dir examples/distillation/data/squad_data \ --train_file train-v1.1.json \ --predict_file dev-v1.1.json \ --do_train --do_eval --do_lower_case \ --model_type masked_bert \ --model_name_or_path bert-base-uncased \ --per_gpu_train_batch_size 16 \ --warmup_steps 5400 \ --num_train_epochs 10 \ --learning_rate 3e-5 \ --initial_threshold 1 --final_threshold 0.15 \ --initial_warmup 1 --final_warmup 2 \ --pruning_method magnitude ``` ### After fine-pruning **Counting parameters** Regularization based pruning methods (soft movement pruning and L0 regularization) rely on the penalty to induce sparsity. The multiplicative coefficient controls the sparsity level. To obtain the effective sparsity level in the encoder, we simply count the number of activated (non-null) weights: ```bash python examples/movement-pruning/counts_parameters.py \ --pruning_method sigmoied_threshold \ --threshold 0.1 \ --serialization_dir $SERIALIZATION_DIR ``` **Pruning once for all** Once the model has been fine-pruned, the pruned weights can be set to 0. once for all (reducing the amount of information to store). In our running experiments, we can convert a `MaskedBertForQuestionAnswering` (a BERT model augmented to enable on-the-fly pruning capabilities) to a standard `BertForQuestionAnswering`: ```bash python examples/movement-pruning/bertarize.py \ --pruning_method sigmoied_threshold \ --threshold 0.1 \ --model_name_or_path $SERIALIZATION_DIR ``` ## Hyper-parameters For reproducibility purposes, we share the detailed results presented in the paper. These [tables](https://docs.google.com/spreadsheets/d/17JgRq_OFFTniUrz6BZWW_87DjFkKXpI1kYDSsseT_7g/edit?usp=sharing) exhaustively describe the individual hyper-parameters used for each data point. ## Inference speed Early experiments show that even though models fine-pruned with (soft) movement pruning are extremely sparse, they do not benefit from significant improvement in terms of inference speed when using the standard PyTorch inference. We are currently benchmarking and exploring inference setups specifically for sparse architectures. In particular, hardware manufacturers are announcing devices that will speedup inference for sparse networks considerably. ## Citation If you find this resource useful, please consider citing the following paper: ``` @article{sanh2020movement, title={Movement Pruning: Adaptive Sparsity by Fine-Tuning}, author={Victor Sanh and Thomas Wolf and Alexander M. Rush}, year={2020}, eprint={2005.07683}, archivePrefix={arXiv}, primaryClass={cs.CL} } ```
7
0
hf_public_repos/block_movement_pruning
hf_public_repos/block_movement_pruning/block_movement_pruning/masked_run_glue.py
# coding=utf-8 # Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team. # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Fine-pruning Masked BERT on sequence classification on GLUE.""" import argparse import glob import json import logging import os import random import numpy as np import torch import torch.nn as nn import torch.nn.functional as F from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset from torch.utils.data.distributed import DistributedSampler from tqdm import tqdm, trange from emmental import MaskedBertConfig, MaskedBertForSequenceClassification from transformers import ( WEIGHTS_NAME, AdamW, BertConfig, BertForSequenceClassification, BertTokenizer, get_linear_schedule_with_warmup, ) from transformers import glue_compute_metrics as compute_metrics from transformers import glue_convert_examples_to_features as convert_examples_to_features from transformers import glue_output_modes as output_modes from transformers import glue_processors as processors try: from torch.utils.tensorboard import SummaryWriter except ImportError: from tensorboardX import SummaryWriter logger = logging.getLogger(__name__) MODEL_CLASSES = { "bert": (BertConfig, BertForSequenceClassification, BertTokenizer), "masked_bert": (MaskedBertConfig, MaskedBertForSequenceClassification, BertTokenizer), } def set_seed(args): random.seed(args.seed) np.random.seed(args.seed) torch.manual_seed(args.seed) if args.n_gpu > 0: torch.cuda.manual_seed_all(args.seed) def schedule_threshold( step: int, total_step: int, warmup_steps: int, initial_threshold: float, final_threshold: float, initial_warmup: int, final_warmup: int, final_lambda: float, ): if step <= initial_warmup * warmup_steps: threshold = initial_threshold elif step > (total_step - final_warmup * warmup_steps): threshold = final_threshold else: spars_warmup_steps = initial_warmup * warmup_steps spars_schedu_steps = (final_warmup + initial_warmup) * warmup_steps mul_coeff = 1 - (step - spars_warmup_steps) / (total_step - spars_schedu_steps) threshold = final_threshold + (initial_threshold - final_threshold) * (mul_coeff ** 3) regu_lambda = final_lambda * threshold / final_threshold return threshold, regu_lambda def regularization(model: nn.Module, mode: str): regu, counter = 0, 0 for name, param in model.named_parameters(): if "mask_scores" in name: if mode == "l1": regu += torch.norm(torch.sigmoid(param), p=1) / param.numel() elif mode == "l0": regu += torch.sigmoid(param - 2 / 3 * np.log(0.1 / 1.1)).sum() / param.numel() else: ValueError("Don't know this mode.") counter += 1 return regu / counter def train(args, train_dataset, model, tokenizer, teacher=None): """ Train the model """ if args.local_rank in [-1, 0]: tb_writer = SummaryWriter(log_dir=args.output_dir) args.train_batch_size = args.per_gpu_train_batch_size * max(1, args.n_gpu) train_sampler = RandomSampler(train_dataset) if args.local_rank == -1 else DistributedSampler(train_dataset) train_dataloader = DataLoader(train_dataset, sampler=train_sampler, batch_size=args.train_batch_size) if args.max_steps > 0: t_total = args.max_steps args.num_train_epochs = args.max_steps // (len(train_dataloader) // args.gradient_accumulation_steps) + 1 else: t_total = len(train_dataloader) // args.gradient_accumulation_steps * args.num_train_epochs # Prepare optimizer and schedule (linear warmup and decay) no_decay = ["bias", "LayerNorm.weight"] optimizer_grouped_parameters = [ { "params": [p for n, p in model.named_parameters() if "mask_score" in n and p.requires_grad], "lr": args.mask_scores_learning_rate, }, { "params": [ p for n, p in model.named_parameters() if "mask_score" not in n and p.requires_grad and not any(nd in n for nd in no_decay) ], "lr": args.learning_rate, "weight_decay": args.weight_decay, }, { "params": [ p for n, p in model.named_parameters() if "mask_score" not in n and p.requires_grad and any(nd in n for nd in no_decay) ], "lr": args.learning_rate, "weight_decay": 0.0, }, ] optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon) scheduler = get_linear_schedule_with_warmup( optimizer, num_warmup_steps=args.warmup_steps, num_training_steps=t_total ) # Check if saved optimizer or scheduler states exist if os.path.isfile(os.path.join(args.model_name_or_path, "optimizer.pt")) and os.path.isfile( os.path.join(args.model_name_or_path, "scheduler.pt") ): # Load in optimizer and scheduler states optimizer.load_state_dict(torch.load(os.path.join(args.model_name_or_path, "optimizer.pt"))) scheduler.load_state_dict(torch.load(os.path.join(args.model_name_or_path, "scheduler.pt"))) if args.fp16: try: from apex import amp except ImportError: raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use fp16 training.") model, optimizer = amp.initialize(model, optimizer, opt_level=args.fp16_opt_level) # multi-gpu training (should be after apex fp16 initialization) if args.n_gpu > 1: model = torch.nn.DataParallel(model) # Distributed training (should be after apex fp16 initialization) if args.local_rank != -1: model = torch.nn.parallel.DistributedDataParallel( model, device_ids=[args.local_rank], output_device=args.local_rank, find_unused_parameters=True, ) # Train! logger.info("***** Running training *****") logger.info(" Num examples = %d", len(train_dataset)) logger.info(" Num Epochs = %d", args.num_train_epochs) logger.info(" Instantaneous batch size per GPU = %d", args.per_gpu_train_batch_size) logger.info( " Total train batch size (w. parallel, distributed & accumulation) = %d", args.train_batch_size * args.gradient_accumulation_steps * (torch.distributed.get_world_size() if args.local_rank != -1 else 1), ) logger.info(" Gradient Accumulation steps = %d", args.gradient_accumulation_steps) logger.info(" Total optimization steps = %d", t_total) # Distillation if teacher is not None: logger.info(" Training with distillation") global_step = 0 # Global TopK if args.global_topk: threshold_mem = None epochs_trained = 0 steps_trained_in_current_epoch = 0 # Check if continuing training from a checkpoint if os.path.exists(args.model_name_or_path): # set global_step to global_step of last saved checkpoint from model path try: global_step = int(args.model_name_or_path.split("-")[-1].split("/")[0]) except ValueError: global_step = 0 epochs_trained = global_step // (len(train_dataloader) // args.gradient_accumulation_steps) steps_trained_in_current_epoch = global_step % (len(train_dataloader) // args.gradient_accumulation_steps) logger.info(" Continuing training from checkpoint, will skip to saved global_step") logger.info(" Continuing training from epoch %d", epochs_trained) logger.info(" Continuing training from global step %d", global_step) logger.info(" Will skip the first %d steps in the first epoch", steps_trained_in_current_epoch) tr_loss, logging_loss = 0.0, 0.0 model.zero_grad() train_iterator = trange( epochs_trained, int(args.num_train_epochs), desc="Epoch", disable=args.local_rank not in [-1, 0], ) set_seed(args) # Added here for reproducibility for _ in train_iterator: epoch_iterator = tqdm(train_dataloader, desc="Iteration", disable=args.local_rank not in [-1, 0]) for step, batch in enumerate(epoch_iterator): # Skip past any already trained steps if resuming training if steps_trained_in_current_epoch > 0: steps_trained_in_current_epoch -= 1 continue model.train() batch = tuple(t.to(args.device) for t in batch) threshold, regu_lambda = schedule_threshold( step=global_step, total_step=t_total, warmup_steps=args.warmup_steps, final_threshold=args.final_threshold, initial_threshold=args.initial_threshold, final_warmup=args.final_warmup, initial_warmup=args.initial_warmup, final_lambda=args.final_lambda, ) # Global TopK if args.global_topk: if threshold == 1.0: threshold = -1e2 # Or an indefinitely low quantity else: if (threshold_mem is None) or (global_step % args.global_topk_frequency_compute == 0): # Sort all the values to get the global topK concat = torch.cat( [param.view(-1) for name, param in model.named_parameters() if "mask_scores" in name] ) n = concat.numel() kth = max(n - (int(n * threshold) + 1), 1) threshold_mem = concat.kthvalue(kth).values.item() threshold = threshold_mem else: threshold = threshold_mem inputs = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3]} if args.model_type != "distilbert": inputs["token_type_ids"] = ( batch[2] if args.model_type in ["bert", "masked_bert", "xlnet", "albert"] else None ) # XLM, DistilBERT, RoBERTa, and XLM-RoBERTa don't use segment_ids if "masked" in args.model_type: inputs["threshold"] = threshold outputs = model(**inputs) loss, logits_stu = outputs # model outputs are always tuple in transformers (see doc) # Distillation loss if teacher is not None: if "token_type_ids" not in inputs: inputs["token_type_ids"] = None if args.teacher_type == "xlm" else batch[2] with torch.no_grad(): (logits_tea,) = teacher( input_ids=inputs["input_ids"], token_type_ids=inputs["token_type_ids"], attention_mask=inputs["attention_mask"], ) loss_logits = ( F.kl_div( input=F.log_softmax(logits_stu / args.temperature, dim=-1), target=F.softmax(logits_tea / args.temperature, dim=-1), reduction="batchmean", ) * (args.temperature ** 2) ) loss = args.alpha_distil * loss_logits + args.alpha_ce * loss # Regularization if args.regularization is not None: regu_ = regularization(model=model, mode=args.regularization) loss = loss + regu_lambda * regu_ if args.n_gpu > 1: loss = loss.mean() # mean() to average on multi-gpu parallel training if args.gradient_accumulation_steps > 1: loss = loss / args.gradient_accumulation_steps if args.fp16: with amp.scale_loss(loss, optimizer) as scaled_loss: scaled_loss.backward() else: loss.backward() tr_loss += loss.item() if (step + 1) % args.gradient_accumulation_steps == 0 or ( # last step in epoch but step is always smaller than gradient_accumulation_steps len(epoch_iterator) <= args.gradient_accumulation_steps and (step + 1) == len(epoch_iterator) ): if args.fp16: torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), args.max_grad_norm) else: torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm) if args.local_rank in [-1, 0] and args.logging_steps > 0 and global_step % args.logging_steps == 0: tb_writer.add_scalar("threshold", threshold, global_step) for name, param in model.named_parameters(): if not param.requires_grad: continue tb_writer.add_scalar("parameter_mean/" + name, param.data.mean(), global_step) tb_writer.add_scalar("parameter_std/" + name, param.data.std(), global_step) tb_writer.add_scalar("parameter_min/" + name, param.data.min(), global_step) tb_writer.add_scalar("parameter_max/" + name, param.data.max(), global_step) tb_writer.add_scalar("grad_mean/" + name, param.grad.data.mean(), global_step) tb_writer.add_scalar("grad_std/" + name, param.grad.data.std(), global_step) if args.regularization is not None and "mask_scores" in name: if args.regularization == "l1": perc = (torch.sigmoid(param) > threshold).sum().item() / param.numel() elif args.regularization == "l0": perc = (torch.sigmoid(param - 2 / 3 * np.log(0.1 / 1.1))).sum().item() / param.numel() tb_writer.add_scalar("retained_weights_perc/" + name, perc, global_step) optimizer.step() scheduler.step() # Update learning rate schedule model.zero_grad() global_step += 1 if args.local_rank in [-1, 0] and args.logging_steps > 0 and global_step % args.logging_steps == 0: logs = {} if ( args.local_rank == -1 and args.evaluate_during_training ): # Only evaluate when single GPU otherwise metrics may not average well results = evaluate(args, model, tokenizer) for key, value in results.items(): eval_key = "eval_{}".format(key) logs[eval_key] = value loss_scalar = (tr_loss - logging_loss) / args.logging_steps learning_rate_scalar = scheduler.get_lr() logs["learning_rate"] = learning_rate_scalar[0] if len(learning_rate_scalar) > 1: for idx, lr in enumerate(learning_rate_scalar[1:]): logs[f"learning_rate/{idx+1}"] = lr logs["loss"] = loss_scalar if teacher is not None: logs["loss/distil"] = loss_logits.item() if args.regularization is not None: logs["loss/regularization"] = regu_.item() if (teacher is not None) or (args.regularization is not None): if (teacher is not None) and (args.regularization is not None): logs["loss/instant_ce"] = ( loss.item() - regu_lambda * logs["loss/regularization"] - args.alpha_distil * logs["loss/distil"] ) / args.alpha_ce elif teacher is not None: logs["loss/instant_ce"] = ( loss.item() - args.alpha_distil * logs["loss/distil"] ) / args.alpha_ce else: logs["loss/instant_ce"] = loss.item() - regu_lambda * logs["loss/regularization"] logging_loss = tr_loss for key, value in logs.items(): tb_writer.add_scalar(key, value, global_step) print(json.dumps({**logs, **{"step": global_step}})) if args.local_rank in [-1, 0] and args.save_steps > 0 and global_step % args.save_steps == 0: # Save model checkpoint output_dir = os.path.join(args.output_dir, "checkpoint-{}".format(global_step)) if not os.path.exists(output_dir): os.makedirs(output_dir) model_to_save = ( model.module if hasattr(model, "module") else model ) # Take care of distributed/parallel training model_to_save.save_pretrained(output_dir) tokenizer.save_pretrained(output_dir) torch.save(args, os.path.join(output_dir, "training_args.bin")) logger.info("Saving model checkpoint to %s", output_dir) torch.save(optimizer.state_dict(), os.path.join(output_dir, "optimizer.pt")) torch.save(scheduler.state_dict(), os.path.join(output_dir, "scheduler.pt")) logger.info("Saving optimizer and scheduler states to %s", output_dir) if args.max_steps > 0 and global_step > args.max_steps: epoch_iterator.close() break if args.max_steps > 0 and global_step > args.max_steps: train_iterator.close() break if args.local_rank in [-1, 0]: tb_writer.close() return global_step, tr_loss / global_step def evaluate(args, model, tokenizer, prefix=""): # Loop to handle MNLI double evaluation (matched, mis-matched) eval_task_names = ("mnli", "mnli-mm") if args.task_name == "mnli" else (args.task_name,) eval_outputs_dirs = (args.output_dir, args.output_dir + "/MM") if args.task_name == "mnli" else (args.output_dir,) results = {} for eval_task, eval_output_dir in zip(eval_task_names, eval_outputs_dirs): eval_dataset = load_and_cache_examples(args, eval_task, tokenizer, evaluate=True) if not os.path.exists(eval_output_dir) and args.local_rank in [-1, 0]: os.makedirs(eval_output_dir) args.eval_batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu) # Note that DistributedSampler samples randomly eval_sampler = SequentialSampler(eval_dataset) eval_dataloader = DataLoader(eval_dataset, sampler=eval_sampler, batch_size=args.eval_batch_size) # multi-gpu eval if args.n_gpu > 1 and not isinstance(model, torch.nn.DataParallel): model = torch.nn.DataParallel(model) # Eval! logger.info("***** Running evaluation {} *****".format(prefix)) logger.info(" Num examples = %d", len(eval_dataset)) logger.info(" Batch size = %d", args.eval_batch_size) eval_loss = 0.0 nb_eval_steps = 0 preds = None out_label_ids = None # Global TopK if args.global_topk: threshold_mem = None for batch in tqdm(eval_dataloader, desc="Evaluating"): model.eval() batch = tuple(t.to(args.device) for t in batch) with torch.no_grad(): inputs = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3]} if args.model_type != "distilbert": inputs["token_type_ids"] = ( batch[2] if args.model_type in ["bert", "masked_bert", "xlnet", "albert"] else None ) # XLM, DistilBERT, RoBERTa, and XLM-RoBERTa don't use segment_ids if "masked" in args.model_type: inputs["threshold"] = args.final_threshold if args.global_topk: if threshold_mem is None: concat = torch.cat( [param.view(-1) for name, param in model.named_parameters() if "mask_scores" in name] ) n = concat.numel() kth = max(n - (int(n * args.final_threshold) + 1), 1) threshold_mem = concat.kthvalue(kth).values.item() inputs["threshold"] = threshold_mem outputs = model(**inputs) tmp_eval_loss, logits = outputs[:2] eval_loss += tmp_eval_loss.mean().item() nb_eval_steps += 1 if preds is None: preds = logits.detach().cpu().numpy() out_label_ids = inputs["labels"].detach().cpu().numpy() else: preds = np.append(preds, logits.detach().cpu().numpy(), axis=0) out_label_ids = np.append(out_label_ids, inputs["labels"].detach().cpu().numpy(), axis=0) eval_loss = eval_loss / nb_eval_steps if args.output_mode == "classification": from scipy.special import softmax probs = softmax(preds, axis=-1) entropy = np.exp((-probs * np.log(probs)).sum(axis=-1).mean()) preds = np.argmax(preds, axis=1) elif args.output_mode == "regression": preds = np.squeeze(preds) result = compute_metrics(eval_task, preds, out_label_ids) results.update(result) if entropy is not None: result["eval_avg_entropy"] = entropy output_eval_file = os.path.join(eval_output_dir, prefix, "eval_results.txt") with open(output_eval_file, "w") as writer: logger.info("***** Eval results {} *****".format(prefix)) for key in sorted(result.keys()): logger.info(" %s = %s", key, str(result[key])) writer.write("%s = %s\n" % (key, str(result[key]))) return results def load_and_cache_examples(args, task, tokenizer, evaluate=False): if args.local_rank not in [-1, 0] and not evaluate: torch.distributed.barrier() # Make sure only the first process in distributed training process the dataset, and the others will use the cache processor = processors[task]() output_mode = output_modes[task] # Load data features from cache or dataset file cached_features_file = os.path.join( args.data_dir, "cached_{}_{}_{}_{}".format( "dev" if evaluate else "train", list(filter(None, args.model_name_or_path.split("/"))).pop(), str(args.max_seq_length), str(task), ), ) if os.path.exists(cached_features_file) and not args.overwrite_cache: logger.info("Loading features from cached file %s", cached_features_file) features = torch.load(cached_features_file) else: logger.info("Creating features from dataset file at %s", args.data_dir) label_list = processor.get_labels() if task in ["mnli", "mnli-mm"] and args.model_type in ["roberta", "xlmroberta"]: # HACK(label indices are swapped in RoBERTa pretrained model) label_list[1], label_list[2] = label_list[2], label_list[1] examples = ( processor.get_dev_examples(args.data_dir) if evaluate else processor.get_train_examples(args.data_dir) ) features = convert_examples_to_features( examples, tokenizer, max_length=args.max_seq_length, label_list=label_list, output_mode=output_mode, ) if args.local_rank in [-1, 0]: logger.info("Saving features into cached file %s", cached_features_file) torch.save(features, cached_features_file) if args.local_rank == 0 and not evaluate: torch.distributed.barrier() # Make sure only the first process in distributed training process the dataset, and the others will use the cache # Convert to Tensors and build dataset all_input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long) all_attention_mask = torch.tensor([f.attention_mask for f in features], dtype=torch.long) all_token_type_ids = torch.tensor([f.token_type_ids for f in features], dtype=torch.long) if output_mode == "classification": all_labels = torch.tensor([f.label for f in features], dtype=torch.long) elif output_mode == "regression": all_labels = torch.tensor([f.label for f in features], dtype=torch.float) dataset = TensorDataset(all_input_ids, all_attention_mask, all_token_type_ids, all_labels) return dataset def main(): parser = argparse.ArgumentParser() # Required parameters parser.add_argument( "--data_dir", default=None, type=str, required=True, help="The input data dir. Should contain the .tsv files (or other data files) for the task.", ) parser.add_argument( "--model_type", default=None, type=str, required=True, help="Model type selected in the list: " + ", ".join(MODEL_CLASSES.keys()), ) parser.add_argument( "--model_name_or_path", default=None, type=str, required=True, help="Path to pretrained model or model identifier from huggingface.co/models", ) parser.add_argument( "--task_name", default=None, type=str, required=True, help="The name of the task to train selected in the list: " + ", ".join(processors.keys()), ) parser.add_argument( "--output_dir", default=None, type=str, required=True, help="The output directory where the model predictions and checkpoints will be written.", ) # Other parameters parser.add_argument( "--config_name", default="", type=str, help="Pretrained config name or path if not the same as model_name", ) parser.add_argument( "--tokenizer_name", default="", type=str, help="Pretrained tokenizer name or path if not the same as model_name", ) parser.add_argument( "--cache_dir", default="", type=str, help="Where do you want to store the pre-trained models downloaded from s3", ) parser.add_argument( "--max_seq_length", default=128, type=int, help="The maximum total input sequence length after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded.", ) parser.add_argument("--do_train", action="store_true", help="Whether to run training.") parser.add_argument("--do_eval", action="store_true", help="Whether to run eval on the dev set.") parser.add_argument( "--evaluate_during_training", action="store_true", help="Run evaluation during training at each logging step.", ) parser.add_argument( "--do_lower_case", action="store_true", help="Set this flag if you are using an uncased model.", ) parser.add_argument( "--per_gpu_train_batch_size", default=8, type=int, help="Batch size per GPU/CPU for training.", ) parser.add_argument( "--per_gpu_eval_batch_size", default=8, type=int, help="Batch size per GPU/CPU for evaluation.", ) parser.add_argument("--learning_rate", default=5e-5, type=float, help="The initial learning rate for Adam.") # Pruning parameters parser.add_argument( "--mask_scores_learning_rate", default=1e-2, type=float, help="The Adam initial learning rate of the mask scores.", ) parser.add_argument( "--initial_threshold", default=1.0, type=float, help="Initial value of the threshold (for scheduling)." ) parser.add_argument( "--final_threshold", default=0.7, type=float, help="Final value of the threshold (for scheduling)." ) parser.add_argument( "--initial_warmup", default=1, type=int, help="Run `initial_warmup` * `warmup_steps` steps of threshold warmup during which threshold stays" "at its `initial_threshold` value (sparsity schedule).", ) parser.add_argument( "--final_warmup", default=2, type=int, help="Run `final_warmup` * `warmup_steps` steps of threshold cool-down during which threshold stays" "at its final_threshold value (sparsity schedule).", ) parser.add_argument( "--pruning_method", default="topK", type=str, help="Pruning Method (l0 = L0 regularization, magnitude = Magnitude pruning, topK = Movement pruning, sigmoied_threshold = Soft movement pruning).", ) parser.add_argument( "--mask_init", default="constant", type=str, help="Initialization method for the mask scores. Choices: constant, uniform, kaiming.", ) parser.add_argument( "--mask_scale", default=0.0, type=float, help="Initialization parameter for the chosen initialization method." ) parser.add_argument("--regularization", default=None, help="Add L0 or L1 regularization to the mask scores.") parser.add_argument( "--final_lambda", default=0.0, type=float, help="Regularization intensity (used in conjunction with `regularization`.", ) parser.add_argument("--global_topk", action="store_true", help="Global TopK on the Scores.") parser.add_argument( "--global_topk_frequency_compute", default=25, type=int, help="Frequency at which we compute the TopK global threshold.", ) # Distillation parameters (optional) parser.add_argument( "--teacher_type", default=None, type=str, help="Teacher type. Teacher tokenizer and student (model) tokenizer must output the same tokenization. Only for distillation.", ) parser.add_argument( "--teacher_name_or_path", default=None, type=str, help="Path to the already fine-tuned teacher model. Only for distillation.", ) parser.add_argument( "--alpha_ce", default=0.5, type=float, help="Cross entropy loss linear weight. Only for distillation." ) parser.add_argument( "--alpha_distil", default=0.5, type=float, help="Distillation loss linear weight. Only for distillation." ) parser.add_argument( "--temperature", default=2.0, type=float, help="Distillation temperature. Only for distillation." ) parser.add_argument( "--gradient_accumulation_steps", type=int, default=1, help="Number of updates steps to accumulate before performing a backward/update pass.", ) parser.add_argument("--weight_decay", default=0.0, type=float, help="Weight decay if we apply some.") parser.add_argument("--adam_epsilon", default=1e-8, type=float, help="Epsilon for Adam optimizer.") parser.add_argument("--max_grad_norm", default=1.0, type=float, help="Max gradient norm.") parser.add_argument( "--num_train_epochs", default=3.0, type=float, help="Total number of training epochs to perform.", ) parser.add_argument( "--max_steps", default=-1, type=int, help="If > 0: set total number of training steps to perform. Override num_train_epochs.", ) parser.add_argument("--warmup_steps", default=0, type=int, help="Linear warmup over warmup_steps.") parser.add_argument("--logging_steps", type=int, default=50, help="Log every X updates steps.") parser.add_argument("--save_steps", type=int, default=50, help="Save checkpoint every X updates steps.") parser.add_argument( "--eval_all_checkpoints", action="store_true", help="Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number", ) parser.add_argument("--no_cuda", action="store_true", help="Avoid using CUDA when available") parser.add_argument( "--overwrite_output_dir", action="store_true", help="Overwrite the content of the output directory", ) parser.add_argument( "--overwrite_cache", action="store_true", help="Overwrite the cached training and evaluation sets", ) parser.add_argument("--seed", type=int, default=42, help="random seed for initialization") parser.add_argument( "--fp16", action="store_true", help="Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit", ) parser.add_argument( "--fp16_opt_level", type=str, default="O1", help="For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']." "See details at https://nvidia.github.io/apex/amp.html", ) parser.add_argument("--local_rank", type=int, default=-1, help="For distributed training: local_rank") args = parser.parse_args() # Regularization if args.regularization == "null": args.regularization = None if ( os.path.exists(args.output_dir) and os.listdir(args.output_dir) and args.do_train and not args.overwrite_output_dir ): raise ValueError( f"Output directory ({args.output_dir}) already exists and is not empty. Use --overwrite_output_dir to overcome." ) # Setup CUDA, GPU & distributed training if args.local_rank == -1 or args.no_cuda: device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu") args.n_gpu = 0 if args.no_cuda else torch.cuda.device_count() else: # Initializes the distributed backend which will take care of synchronizing nodes/GPUs torch.cuda.set_device(args.local_rank) device = torch.device("cuda", args.local_rank) torch.distributed.init_process_group(backend="nccl") args.n_gpu = 1 args.device = device # Setup logging logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN, ) logger.warning( "Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s", args.local_rank, device, args.n_gpu, bool(args.local_rank != -1), args.fp16, ) # Set seed set_seed(args) # Prepare GLUE task args.task_name = args.task_name.lower() if args.task_name not in processors: raise ValueError("Task not found: %s" % (args.task_name)) processor = processors[args.task_name]() args.output_mode = output_modes[args.task_name] label_list = processor.get_labels() num_labels = len(label_list) # Load pretrained model and tokenizer if args.local_rank not in [-1, 0]: torch.distributed.barrier() # Make sure only the first process in distributed training will download model & vocab args.model_type = args.model_type.lower() config_class, model_class, tokenizer_class = MODEL_CLASSES[args.model_type] config = config_class.from_pretrained( args.config_name if args.config_name else args.model_name_or_path, num_labels=num_labels, finetuning_task=args.task_name, cache_dir=args.cache_dir if args.cache_dir else None, pruning_method=args.pruning_method, mask_init=args.mask_init, mask_scale=args.mask_scale, ) tokenizer = tokenizer_class.from_pretrained( args.tokenizer_name if args.tokenizer_name else args.model_name_or_path, cache_dir=args.cache_dir if args.cache_dir else None, do_lower_case=args.do_lower_case, ) model = model_class.from_pretrained( args.model_name_or_path, from_tf=bool(".ckpt" in args.model_name_or_path), config=config, cache_dir=args.cache_dir if args.cache_dir else None, ) if args.teacher_type is not None: assert args.teacher_name_or_path is not None assert args.alpha_distil > 0.0 assert args.alpha_distil + args.alpha_ce > 0.0 teacher_config_class, teacher_model_class, _ = MODEL_CLASSES[args.teacher_type] teacher_config = teacher_config_class.from_pretrained(args.teacher_name_or_path) teacher = teacher_model_class.from_pretrained( args.teacher_name_or_path, from_tf=False, config=teacher_config, cache_dir=args.cache_dir if args.cache_dir else None, ) teacher.to(args.device) else: teacher = None if args.local_rank == 0: torch.distributed.barrier() # Make sure only the first process in distributed training will download model & vocab model.to(args.device) logger.info("Training/evaluation parameters %s", args) # Training if args.do_train: train_dataset = load_and_cache_examples(args, args.task_name, tokenizer, evaluate=False) global_step, tr_loss = train(args, train_dataset, model, tokenizer, teacher=teacher) logger.info(" global_step = %s, average loss = %s", global_step, tr_loss) # Saving best-practices: if you use defaults names for the model, you can reload it using from_pretrained() if args.do_train and (args.local_rank == -1 or torch.distributed.get_rank() == 0): logger.info("Saving model checkpoint to %s", args.output_dir) # Save a trained model, configuration and tokenizer using `save_pretrained()`. # They can then be reloaded using `from_pretrained()` model_to_save = ( model.module if hasattr(model, "module") else model ) # Take care of distributed/parallel training model_to_save.save_pretrained(args.output_dir) tokenizer.save_pretrained(args.output_dir) # Good practice: save your training arguments together with the trained model torch.save(args, os.path.join(args.output_dir, "training_args.bin")) # Load a trained model and vocabulary that you have fine-tuned model = model_class.from_pretrained(args.output_dir) tokenizer = tokenizer_class.from_pretrained(args.output_dir, do_lower_case=args.do_lower_case) model.to(args.device) # Evaluation results = {} if args.do_eval and args.local_rank in [-1, 0]: tokenizer = tokenizer_class.from_pretrained(args.output_dir, do_lower_case=args.do_lower_case) checkpoints = [args.output_dir] if args.eval_all_checkpoints: checkpoints = list( os.path.dirname(c) for c in sorted(glob.glob(args.output_dir + "/**/" + WEIGHTS_NAME, recursive=True)) ) logger.info("Evaluate the following checkpoints: %s", checkpoints) for checkpoint in checkpoints: global_step = checkpoint.split("-")[-1] if len(checkpoints) > 1 else "" prefix = checkpoint.split("/")[-1] if checkpoint.find("checkpoint") != -1 else "" model = model_class.from_pretrained(checkpoint) model.to(args.device) result = evaluate(args, model, tokenizer, prefix=prefix) result = dict((k + "_{}".format(global_step), v) for k, v in result.items()) results.update(result) return results if __name__ == "__main__": main()
8
0
hf_public_repos/block_movement_pruning
hf_public_repos/block_movement_pruning/block_movement_pruning/run.sh
python masked_run_squad.py --output_dir block_movement_pruning/output --overwrite_output_dir \ --data_dir squad_data \ --train_file train-v1.1.json --predict_file dev-v1.1.json \ --do_train --do_eval \ --save_steps 5000 \ --logging_steps 200 \ --eval_all_checkpoints \ --do_lower_case \ --model_type masked_bert \ --model_name_or_path bert-base-uncased \ --per_gpu_train_batch_size 16 --gradient_accumulation_steps 1 \ --per_gpu_eval_batch_size 16 \ --num_train_epochs 10 \ --learning_rate 3e-5 \ --initial_threshold 1 \ --final_threshold 0.1 \ --warmup_steps 5400 \ --initial_warmup 1 \ --final_warmup 2 \ --pruning_method topK \ --ampere_pruning_method disabled \ --mask_scores_learning_rate 1e-2 \ --mask_init constant \ --mask_scale 0. \ --mask_block_rows 1 \ --mask_block_cols 1 \ --threads 8 \ --shuffling_method mask_annealing \ --initial_shuffling_temperature 0.1 \ --final_shuffling_temperature 20 \ --shuffling_learning_rate 1e-2 \ --in_shuffling_group 2 \ --out_shuffling_group 2 #--truncate_train_examples 100 # --initial_ampere_temperature 0.1 \ # --final_ampere_temperature 10 \
9
0
hf_public_repos
hf_public_repos/blog/aivsai.md
--- title: "Introducing ⚔️ AI vs. AI ⚔️ a deep reinforcement learning multi-agents competition system" thumbnail: /blog/assets/128_aivsai/thumbnail.png authors: - user: CarlCochet - user: ThomasSimonini --- # Introducing ⚔️ AI vs. AI ⚔️ a deep reinforcement learning multi-agents competition system <div align="center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/blog/128_aivsai/thumbnail.png" alt="Thumbnail"> </div> We’re excited to introduce a new tool we created: **⚔️ AI vs. AI ⚔️, a deep reinforcement learning multi-agents competition system**. This tool, hosted on [Spaces](https://hf.co/spaces), allows us **to create multi-agent competitions**. It is composed of three elements: - A *Space* with a matchmaking algorithm that **runs the model fights using a background task**. - A *Dataset* **containing the results**. - A *Leaderboard* that gets the **match history results and displays the models’ ELO**. Then, when a user pushes a trained model to the Hub, **it gets evaluated and ranked against others**. Thanks to that, we can evaluate your agents against other’s agents in a multi-agent setting. In addition to being a useful tool for hosting multi-agent competitions, we think this tool can also be a **robust evaluation technique in multi-agent settings.** By playing against a lot of policies, your agents are evaluated against a wide range of behaviors. This should give you a good idea of the quality of your policy. Let’s see how it works with our first competition host: SoccerTwos Challenge. <div align="center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/blog/128_aivsai/soccertwos.gif" alt="SoccerTwos example"> </div> ## How does AI vs. AI works? AI vs. AI is an open-source tool developed at Hugging Face **to rank the strength of reinforcement learning models in a multi-agent setting**. The idea is to get a **relative measure of skill rather than an objective one** by making the models play against each other continuously and use the matches results to assess their performance compared to all the other models and consequently get a view of the quality of their policy without requiring classic metrics. The more agents are submitted for a given task or environment, **the more representative the rating becomes**. To generate a rating based on match results in a competitive environment, we decided to base the rankings on the [ELO rating system](https://en.wikipedia.org/wiki/Elo_rating_system). The core concept is that after a match ends, the rating of both players are updated based on the result and the ratings they had before the game. When a user with a high rating beats one with a low ranking, they won't get many points. Likewise, the loser would not lose many points in this case. Conversely, if a low-rated player wins in an upset against a high-rated player, it will cause a more significant effect on both of their ratings. In our context, we **kept the system as simple as possible by not adding any alteration to the quantities gained or lost based on the starting ratings of the player**. As such, gain and loss will always be the perfect opposite (+10 / -10, for instance), and the average ELO rating will stay constant at the starting rating. The choice of a 1200 ELO rating start is entirely arbitrary. If you want to learn more about ELO and see some calculation example, we wrote an explanation in our Deep Reinforcement Learning Course [here](https://huggingface.co/deep-rl-course/unit7/self-play?fw=pt#the-elo-score-to-evaluate-our-agent) Using this rating, it is possible **to generate matches between models with comparable strengths automatically**. There are several ways you can go about creating a matchmaking system, but here we decided to keep it fairly simple while guaranteeing a minimum amount of diversity in the matchups and also keeping most matches with fairly close opposing ratings. <div align="center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/blog/128_aivsai/aivsai.png" alt="AI vs AI Process"> </div> Here's how works the algorithm: 1. Gather all the available models on the Hub. New models get a starting rating of 1200, while others keep the rating they have gained/lost through their previous matches. 2. Create a queue from all these models. 3. Pop the first element (model) from the queue, and then pop another random model in this queue from the n models with the closest ratings to the first model. 4. Simulate this match by loading both models in the environment (a Unity executable, for instance) and gathering the results. For this implementation, we sent the results to a Hugging Face Dataset on the Hub. 5. Compute the new rating of both models based on the received result and the ELO formula. 6. Continue popping models two by two and simulating the matches until only one or zero models are in the queue. 7. Save the resulting ratings and go back to step 1 To run this matchmaking process continuously, we use **free Hugging Face Spaces hardware with a Scheduler** to keep running the matchmaking process as a background task. The Spaces is also used to fetch the ELO ratings of each model that have already been played and, from it display [a leaderboard](https://huggingface.co/spaces/huggingface-projects/AIvsAI-SoccerTwos) **from which everyone can check the progress of the models**. <div align="center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/blog/128_aivsai/leaderboard.png" alt="Leaderboard"> </div> The process generally uses several Hugging Face Datasets to provide data persistence (here, matches history and model ratings). Since the process also saves the matches' history, it is possible to see precisely the results of any given model. This can, for instance, allow you to check why your model struggles with another one, most notably using another demo Space to visualize matches like [this one](https://huggingface.co/spaces/unity/ML-Agents-SoccerTwos.). For now, **this experiment is running with the MLAgent environment SoccerTwos for the Hugging Face Deep RL Course**, however, the process and implementation, in general, are very much **environment agnostic and could be used to evaluate for free a wide range of adversarial multi-agent settings**. Of course, it is important to remind again that this evaluation is a relative rating between the strengths of the submitted agents, and the ratings by themselves **have no objective meaning contrary to other metrics**. It only represents how good or bad a model performs compared to the other models in the pool. Still, given a large and varied enough pool of models (and enough matches played), this evaluation becomes a very solid way to represent the general performance of a model. ## Our first AI vs. AI challenge experimentation: SoccerTwos Challenge ⚽ This challenge is Unit 7 of our [free Deep Reinforcement Learning Course](https://huggingface.co/deep-rl-course/unit0/introduction). It started on February 1st and will end on April 30th. If you’re interested, **you don’t need to participate in the course to be able to participate in the competition. You can start here** 👉 https://huggingface.co/deep-rl-course/unit7/introduction In this Unit, readers learned the basics of multi-agent reinforcement learning (MARL)by training a **2vs2 soccer team.** ⚽ The environment used was made by the [Unity ML-Agents team](https://github.com/Unity-Technologies/ml-agents). The goal is simple: your team needs to score a goal. To do that, they need to beat the opponent's team and collaborate with their teammate. <div align="center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/blog/128_aivsai/soccertwos.gif" alt="SoccerTwos example"> </div> In addition to the leaderboard, we created a Space demo where people can choose two teams and visualize them playing 👉[https://huggingface.co/spaces/unity/SoccerTwos](https://huggingface.co/spaces/unity/SoccerTwos) This experimentation is going well since we already have 48 models on the [leaderboard](https://huggingface.co/spaces/huggingface-projects/AIvsAI-SoccerTwos) ![Leaderboard](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/blog/128_aivsai/leaderboard.png) We also [created a discord channel called ai-vs-ai-competition](http://hf.co/discord/join) so that people can exchange with others and share advice. ### Conclusion and what’s next? Since the tool we developed **is environment agnostic**, we want to host more challenges in the future with [PettingZoo](https://pettingzoo.farama.org/) and other multi-agent environments. If you have some environments or challenges you want to do, <a href="mailto:[email protected]">don’t hesitate to reach out to us</a>. In the future, we will host multiple multi-agent competitions with this tool and environments we created, such as SnowballFight. <div align="center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/blog/128_aivsai/snowballfight.gif" alt="Snowballfight gif"> </div> In addition to being a useful tool for hosting multi-agent competitions, we think that this tool can also be **a robust evaluation technique in multi-agent settings: by playing against a lot of policies, your agents are evaluated against a wide range of behaviors, and you’ll get a good idea of the quality of your policy.** The best way to keep in touch is to [join our discord server](http://hf.co/discord/join) to exchange with us and with the community. ****************Citation**************** Citation: If you found this useful for your academic work, please consider citing our work, in text: `Cochet, Simonini, "Introducing AI vs. AI a deep reinforcement learning multi-agents competition system", Hugging Face Blog, 2023.` BibTeX citation: ``` @article{cochet-simonini2023, author = {Cochet, Carl and Simonini, Thomas}, title = {Introducing AI vs. AI a deep reinforcement learning multi-agents competition system}, journal = {Hugging Face Blog}, year = {2023}, note = {https://huggingface.co/blog/aivsai}, } ```
0
0
hf_public_repos
hf_public_repos/blog/train-sentence-transformers.md
--- title: "Training and Finetuning Embedding Models with Sentence Transformers v3" thumbnail: /blog/assets/train-sentence-transformers/st-hf-thumbnail.png authors: - user: tomaarsen --- # Training and Finetuning Embedding Models with Sentence Transformers v3 [Sentence Transformers](https://sbert.net/) is a Python library for using and training embedding models for a wide range of applications, such as retrieval augmented generation, semantic search, semantic textual similarity, paraphrase mining, and more. Its v3.0 update is the largest since the project's inception, introducing a new training approach. In this blogpost, I'll show you how to use it to finetune Sentence Transformer models to improve their performance on specific tasks. You can also use this method to train new Sentence Transformer models from scratch. Finetuning Sentence Transformers now involves several components, including datasets, loss functions, training arguments, evaluators, and the new trainer itself. I'll go through each of these components in detail and provide examples of how to use them to train effective models. ## Table of Contents * [Why Finetune?](#why-finetune) * [Training Components](#training-components) * [Dataset](#dataset) + [Data on Hugging Face Hub](#data-on-hugging-face-hub) + [Local Data (CSV, JSON, Parquet, Arrow, SQL)](#local-data-csv-json-parquet-arrow-sql) + [Local Data that requires pre-processing](#local-data-that-requires-pre-processing) + [Dataset Format](#dataset-format) * [Loss Function](#loss-function) * [Training Arguments](#training-arguments) * [Evaluator](#evaluator) + [EmbeddingSimilarityEvaluator with STSb](#embeddingsimilarityevaluator-with-stsb) + [TripletEvaluator with AllNLI](#tripletevaluator-with-allnli) * [Trainer](#trainer) + [Callbacks](#callbacks) * [Multi-Dataset Training](#multi-dataset-training) * [Deprecation](#deprecation) * [Additional Resources](#additional-resources) + [Training Examples](#training-examples) + [Documentation](#documentation) ## Why Finetune? Finetuning Sentence Transformer models can significantly enhance their performance on specific tasks. This is because each task requires a unique notion of similarity. Let's consider a couple of news article headlines as an example: - "Apple launches the new iPad" - "NVIDIA is gearing up for the next GPU generation" Depending on the use case, we might want similar or dissimilar embeddings for these texts. For instance, a classification model for news articles could treat these texts as similar since they both belong to the Technology category. On the other hand, a semantic textual similarity or retrieval model should consider them dissimilar due to their distinct meanings. ## Training Components Training Sentence Transformer models involves the following components: 1. [**Dataset**](#dataset): The data used for training and evaluation. 2. [**Loss Function**](#loss-function): A function that quantifies the model's performance and guides the optimization process. 3. [**Training Arguments**](#training-arguments) (optional): Parameters that influence training performance and tracking/debugging. 4. [**Evaluator**](#evaluator) (optional): A tool for evaluating the model before, during, or after training. 5. [**Trainer**](#trainer): Brings together the model, dataset, loss function, and other components for training. Now, let's dive into each of these components in more detail. ## Dataset The [`SentenceTransformerTrainer`](https://sbert.net/docs/package_reference/sentence_transformer/SentenceTransformer.html#sentence_transformers.SentenceTransformer) uses [`datasets.Dataset`](https://huggingface.co/docs/datasets/main/en/package_reference/main_classes#datasets.Dataset) or [`datasets.DatasetDict`](https://huggingface.co/docs/datasets/main/en/package_reference/main_classes#datasets.DatasetDict) instances for training and evaluation. You can load data from the Hugging Face Datasets Hub or use local data in various formats such as CSV, JSON, Parquet, Arrow, or SQL. Note: Many Hugging Face datasets that work out of the box with Sentence Transformers have been tagged with `sentence-transformers`, allowing you to easily find them by browsing to [https://huggingface.co/datasets?other=sentence-transformers](https://huggingface.co/datasets?other=sentence-transformers). We strongly recommend that you browse these datasets to find training datasets that might be useful for your tasks. ### Data on Hugging Face Hub To load data from datasets in the Hugging Face Hub, use the [`load_dataset`](https://huggingface.co/docs/datasets/main/en/package_reference/loading_methods#datasets.load_dataset) function: ```python from datasets import load_dataset train_dataset = load_dataset("sentence-transformers/all-nli", "pair-class", split="train") eval_dataset = load_dataset("sentence-transformers/all-nli", "pair-class", split="dev") print(train_dataset) """ Dataset({ features: ['premise', 'hypothesis', 'label'], num_rows: 942069 }) """ ``` Some datasets, like [`sentence-transformers/all-nli`](https://huggingface.co/datasets/sentence-transformers/all-nli), have multiple subsets with different data formats. You need to specify the subset name along with the dataset name. ### Local Data (CSV, JSON, Parquet, Arrow, SQL) If you have local data in common file formats, you can easily load it using [`load_dataset`](https://huggingface.co/docs/datasets/main/en/package_reference/loading_methods#datasets.load_dataset) too: ```python from datasets import load_dataset dataset = load_dataset("csv", data_files="my_file.csv") # or dataset = load_dataset("json", data_files="my_file.json") ``` ### Local Data that requires pre-processing If your local data requires pre-processing, you can use [`datasets.Dataset.from_dict`](https://huggingface.co/docs/datasets/main/en/package_reference/main_classes#datasets.Dataset.from_dict) to initialize your dataset with a dictionary of lists: ```python from datasets import Dataset anchors = [] positives = [] # Open a file, perform preprocessing, filtering, cleaning, etc. # and append to the lists dataset = Dataset.from_dict({ "anchor": anchors, "positive": positives, }) ``` Each key in the dictionary becomes a column in the resulting dataset. ### Dataset Format It's crucial to ensure that your dataset format matches your chosen [loss function](#loss-function). This involves checking two things: 1. If your loss function requires a *Label* (as indicated in the [Loss Overview](https://sbert.net/docs/sentence_transformer/loss_overview.html) table), your dataset must have a column named **"label"** or **"score"**. 2. All columns other than **"label"** or **"score"** are considered *Inputs* (as indicated in the [Loss Overview](https://sbert.net/docs/sentence_transformer/loss_overview.html) table). The number of these columns must match the number of valid inputs for your chosen loss function. The names of the columns don't matter, **only their order matters**. For example, if your loss function accepts `(anchor, positive, negative) triplets`, then your first, second, and third dataset columns correspond with `anchor`, `positive`, and `negative`, respectively. This means that your first and second column must contain texts that should embed closely, and that your first and third column must contain texts that should embed far apart. That is why depending on your loss function, your dataset column order matters. Consider a dataset with columns `["text1", "text2", "label"]`, where the `"label"` column contains floating point similarity scores. This dataset can be used with `CoSENTLoss`, `AnglELoss`, and `CosineSimilarityLoss` because: 1. The dataset has a "label" column, which is required by these loss functions. 2. The dataset has 2 non-label columns, matching the number of inputs required by these loss functions. If the columns in your dataset are not ordered correctly, use [`Dataset.select_columns`](https://huggingface.co/docs/datasets/main/en/package_reference/main_classes#datasets.Dataset.select_columns) to reorder them. Additionally, remove any extraneous columns (e.g., `sample_id`, `metadata`, `source`, `type`) using [`Dataset.remove_columns`](https://huggingface.co/docs/datasets/main/en/package_reference/main_classes#datasets.Dataset.remove_columns), as they will be treated as inputs otherwise. ## Loss Function Loss functions measure how well a model performs on a given batch of data and guide the optimization process. The choice of loss function depends on your available data and target task. Refer to the [Loss Overview](https://sbert.net/docs/sentence_transformer/loss_overview.html) for a comprehensive list of options. Most loss functions can be initialized with just the `SentenceTransformer` `model` that you're training: ```python from datasets import load_dataset from sentence_transformers import SentenceTransformer from sentence_transformers.losses import CoSENTLoss # Load a model to train/finetune model = SentenceTransformer("FacebookAI/xlm-roberta-base") # Initialize the CoSENTLoss # This loss requires pairs of text and a floating point similarity score as a label loss = CoSENTLoss(model) # Load an example training dataset that works with our loss function: train_dataset = load_dataset("sentence-transformers/all-nli", "pair-score", split="train") """ Dataset({ features: ['sentence1', 'sentence2', 'label'], num_rows: 942069 }) """ ``` ## Training Arguments The [`SentenceTransformersTrainingArguments`](https://sbert.net/docs/package_reference/sentence_transformer/training_args.html#sentencetransformertrainingarguments) class allows you to specify parameters that influence training performance and tracking/debugging. While optional, experimenting with these arguments can help improve training efficiency and provide insights into the training process. In the Sentence Transformers documentation, I've outlined some of the most useful training arguments. I would recommend reading it in [Training Overview > Training Arguments](https://sbert.net/docs/sentence_transformer/training_overview.html#training-arguments). Here's an example of how to initialize [`SentenceTransformersTrainingArguments`](https://sbert.net/docs/package_reference/sentence_transformer/training_args.html#sentencetransformertrainingarguments): ```python from sentence_transformers.training_args import SentenceTransformerTrainingArguments args = SentenceTransformerTrainingArguments( # Required parameter: output_dir="models/mpnet-base-all-nli-triplet", # Optional training parameters: num_train_epochs=1, per_device_train_batch_size=16, per_device_eval_batch_size=16, warmup_ratio=0.1, fp16=True, # Set to False if your GPU can't handle FP16 bf16=False, # Set to True if your GPU supports BF16 batch_sampler=BatchSamplers.NO_DUPLICATES, # Losses using "in-batch negatives" benefit from no duplicates # Optional tracking/debugging parameters: eval_strategy="steps", eval_steps=100, save_strategy="steps", save_steps=100, save_total_limit=2, logging_steps=100, run_name="mpnet-base-all-nli-triplet", # Used in W&B if `wandb` is installed ) ``` Note that `eval_strategy` was introduced in `transformers` version `4.41.0`. Prior versions should use `evaluation_strategy` instead. ## Evaluator You can provide the [`SentenceTransformerTrainer`](https://sbert.net/docs/package_reference/sentence_transformer/SentenceTransformer.html#sentence_transformers.SentenceTransformer) with an `eval_dataset` to get the evaluation loss during training, but it may be useful to get more concrete metrics during training, too. For this, you can use evaluators to assess the model's performance with useful metrics before, during, or after training. You can both an `eval_dataset` and an evaluator, one or the other, or neither. They evaluate based on the `eval_strategy` and `eval_steps` [Training Arguments](#training-arguments). Here are the implemented Evaluators that come with Sentence Tranformers: | Evaluator | Required Data | | --- | --- | | [`BinaryClassificationEvaluator`](https://sbert.net/docs/package_reference/sentence_transformer/evaluation.html#binaryclassificationevaluator) | Pairs with class labels | | [`EmbeddingSimilarityEvaluator`](https://sbert.net/docs/package_reference/sentence_transformer/evaluation.html#embeddingsimilarityevaluator) | Pairs with similarity scores | | [`InformationRetrievalEvaluator`](https://sbert.net/docs/package_reference/sentence_transformer/evaluation.html#informationretrievalevaluator) | Queries (qid => question), Corpus (cid => document), and relevant documents (qid => set[cid]) | | [`MSEEvaluator`](https://sbert.net/docs/package_reference/sentence_transformer/evaluation.html#mseevaluator) | Source sentences to embed with a teacher model and target sentences to embed with the student model. Can be the same texts. | | [`ParaphraseMiningEvaluator`](https://sbert.net/docs/package_reference/sentence_transformer/evaluation.html#paraphraseminingevaluator) | Mapping of IDs to sentences & pairs with IDs of duplicate sentences. | | [`RerankingEvaluator`](https://sbert.net/docs/package_reference/sentence_transformer/evaluation.html#rerankingevaluator) | List of {'query': '..', 'positive': [...], 'negative': [...]} dictionaries. | | [`TranslationEvaluator`](https://sbert.net/docs/package_reference/sentence_transformer/evaluation.html#translationevaluator) | Pairs of sentences in two separate languages. | | [`TripletEvaluator`](https://sbert.net/docs/package_reference/sentence_transformer/evaluation.html#tripletevaluator) | (anchor, positive, negative) pairs. | Additionally, you can use [`SequentialEvaluator`](https://sbert.net/docs/package_reference/sentence_transformer/evaluation.html#sequentialevaluator) to combine multiple evaluators into one, which can then be passed to the [`SentenceTransformerTrainer`](https://sbert.net/docs/package_reference/sentence_transformer/SentenceTransformer.html#sentence_transformers.SentenceTransformer). If you don't have the necessary evaluation data but still want to track the model's performance on common benchmarks, you can use these evaluators with data from Hugging Face: ### EmbeddingSimilarityEvaluator with STSb The STS Benchmark (a.k.a. STSb) is a commonly used benchmarking dataset to measure the model's understanding of semantic textual similarity of short texts like "A man is feeding a mouse to a snake.". Feel free to browse the [sentence-transformers/stsb](https://huggingface.co/datasets/sentence-transformers/stsb) dataset on Hugging Face. ```python from datasets import load_dataset from sentence_transformers.evaluation import EmbeddingSimilarityEvaluator, SimilarityFunction # Load the STSB dataset eval_dataset = load_dataset("sentence-transformers/stsb", split="validation") # Initialize the evaluator dev_evaluator = EmbeddingSimilarityEvaluator( sentences1=eval_dataset["sentence1"], sentences2=eval_dataset["sentence2"], scores=eval_dataset["score"], main_similarity=SimilarityFunction.COSINE, name="sts-dev", ) # Run evaluation manually: # print(dev_evaluator(model)) # Later, you can provide this evaluator to the trainer to get results during training ``` ### TripletEvaluator with AllNLI AllNLI is a concatenation of the [SNLI](https://huggingface.co/datasets/stanfordnlp/snli) and [MultiNLI](https://huggingface.co/datasets/nyu-mll/multi_nli) datasets, both of which are datasets for Natural Language Inference. This task is traditionally for determining whether two texts are an entailment, contradiction, or neither. It has since been adopted for training embedding models, as the entailing and contradictory sentences make for useful `(anchor, positive, negative)` triplets: a common format for training embedding models. In this snippet, it is used to evaluate how frequently the model considers the anchor text and the entailing text to be more similar than the anchor text and the contradictory text. An example text is "An older man is drinking orange juice at a restaurant.". Feel free to browse the [sentence-transformers/all-nli](https://huggingface.co/datasets/sentence-transformers/all-nli) dataset on Hugging Face. ```python from datasets import load_dataset from sentence_transformers.evaluation import TripletEvaluator, SimilarityFunction # Load triplets from the AllNLI dataset max_samples = 1000 eval_dataset = load_dataset("sentence-transformers/all-nli", "triplet", split=f"dev[:{max_samples}]") # Initialize the evaluator dev_evaluator = TripletEvaluator( anchors=eval_dataset["anchor"], positives=eval_dataset["positive"], negatives=eval_dataset["negative"], main_distance_function=SimilarityFunction.COSINE, name=f"all-nli-{max_samples}-dev", ) # Run evaluation manually: # print(dev_evaluator(model)) # Later, you can provide this evaluator to the trainer to get results during training ``` ## Trainer The [`SentenceTransformerTrainer`](https://sbert.net/docs/package_reference/sentence_transformer/SentenceTransformer.html#sentence_transformers.SentenceTransformer) brings together the model, dataset, loss function, and other components for training: ```python from datasets import load_dataset from sentence_transformers import ( SentenceTransformer, SentenceTransformerTrainer, SentenceTransformerTrainingArguments, SentenceTransformerModelCardData, ) from sentence_transformers.losses import MultipleNegativesRankingLoss from sentence_transformers.training_args import BatchSamplers from sentence_transformers.evaluation import TripletEvaluator # 1. Load a model to finetune with 2. (Optional) model card data model = SentenceTransformer( "microsoft/mpnet-base", model_card_data=SentenceTransformerModelCardData( language="en", license="apache-2.0", model_name="MPNet base trained on AllNLI triplets", ) ) # 3. Load a dataset to finetune on dataset = load_dataset("sentence-transformers/all-nli", "triplet") train_dataset = dataset["train"].select(range(100_000)) eval_dataset = dataset["dev"] test_dataset = dataset["test"] # 4. Define a loss function loss = MultipleNegativesRankingLoss(model) # 5. (Optional) Specify training arguments args = SentenceTransformerTrainingArguments( # Required parameter: output_dir="models/mpnet-base-all-nli-triplet", # Optional training parameters: num_train_epochs=1, per_device_train_batch_size=16, per_device_eval_batch_size=16, warmup_ratio=0.1, fp16=True, # Set to False if GPU can't handle FP16 bf16=False, # Set to True if GPU supports BF16 batch_sampler=BatchSamplers.NO_DUPLICATES, # MultipleNegativesRankingLoss benefits from no duplicates # Optional tracking/debugging parameters: eval_strategy="steps", eval_steps=100, save_strategy="steps", save_steps=100, save_total_limit=2, logging_steps=100, run_name="mpnet-base-all-nli-triplet", # Used in W&B if `wandb` is installed ) # 6. (Optional) Create an evaluator & evaluate the base model dev_evaluator = TripletEvaluator( anchors=eval_dataset["anchor"], positives=eval_dataset["positive"], negatives=eval_dataset["negative"], name="all-nli-dev", ) dev_evaluator(model) # 7. Create a trainer & train trainer = SentenceTransformerTrainer( model=model, args=args, train_dataset=train_dataset, eval_dataset=eval_dataset, loss=loss, evaluator=dev_evaluator, ) trainer.train() # (Optional) Evaluate the trained model on the test set, after training completes test_evaluator = TripletEvaluator( anchors=test_dataset["anchor"], positives=test_dataset["positive"], negatives=test_dataset["negative"], name="all-nli-test", ) test_evaluator(model) # 8. Save the trained model model.save_pretrained("models/mpnet-base-all-nli-triplet/final") # 9. (Optional) Push it to the Hugging Face Hub model.push_to_hub("mpnet-base-all-nli-triplet") ``` In this example I'm finetuning from [`microsoft/mpnet-base`](https://huggingface.co/microsoft/mpnet-base), a base model that is not yet a Sentence Transformer model. This requires more training data than finetuning an existing Sentence Transformer model, like [`all-mpnet-base-v2`](https://huggingface.co/sentence-transformers/all-mpnet-base-v2). After running this script, the [tomaarsen/mpnet-base-all-nli-triplet](https://huggingface.co/tomaarsen/mpnet-base-all-nli-triplet) model was uploaded for me. The triplet accuracy using cosine similarity, i.e. what percentage of the time `cosine_similarity(anchor, positive) > cosine_similarity(anchor, negative)` is 90.04% for the development set and 91.5% for the testing set! For reference, the [`microsoft/mpnet-base`](https://huggingface.co/microsoft/mpnet-base) model scored only 68.32% on the dev set before training. All of this information is stored in the automatically generated model card, including the base model, language, license, evaluation results, training & evaluation dataset info, hyperparameters, training logs, and more. Without any effort, your uploaded models should contain all the information that your potential users would need to determine whether your model is suitable for them. ### Callbacks The Sentence Transformers trainer supports various [`transformers.TrainerCallback`](https://huggingface.co/docs/transformers/main_classes/callback#transformers.TrainerCallback) subclasses, including: - [`WandbCallback`](https://huggingface.co/docs/transformers/en/main_classes/callback#transformers.integrations.WandbCallback) for logging training metrics to W&B if `wandb` is installed - [`TensorBoardCallback`](https://huggingface.co/docs/transformers/en/main_classes/callback#transformers.integrations.TensorBoardCallback) for logging training metrics to TensorBoard if `tensorboard` is accessible - [`CodeCarbonCallback`](https://huggingface.co/docs/transformers/en/main_classes/callback#transformers.integrations.CodeCarbonCallback) for tracking carbon emissions during training if `codecarbon` is installed These are automatically used without you having to specify anything, as long as the required dependency is installed. Refer to the [Transformers Callbacks documentation](https://huggingface.co/docs/transformers/en/main_classes/callback) for more information on these callbacks and how to create your own. ## Multi-Dataset Training Top-performing models are often trained using multiple datasets simultaneously. The [`SentenceTransformerTrainer`](https://sbert.net/docs/package_reference/sentence_transformer/SentenceTransformer.html#sentence_transformers.SentenceTransformer) simplifies this process by allowing you to train with multiple datasets without converting them to the same format. You can even apply different loss functions to each dataset. Here are the steps for multi-dataset training: 1. Use a dictionary of [`datasets.Dataset`](https://huggingface.co/docs/datasets/main/en/package_reference/main_classes#datasets.Dataset) instances (or a [`datasets.DatasetDict`](https://huggingface.co/docs/datasets/main/en/package_reference/main_classes#datasets.DatasetDict)) as the `train_dataset` and `eval_dataset`. 2. (Optional) Use a dictionary of loss functions mapping dataset names to losses if you want to use different losses for different datasets. Each training/evaluation batch will contain samples from only one of the datasets. The order in which batches are sampled from the multiple datasets is determined by the [`MultiDatasetBatchSamplers`](https://sbert.net/docs/package_reference/sentence_transformer/training_args.html#sentence_transformers.training_args.MultiDatasetBatchSamplers) enum, which can be passed to the [`SentenceTransformersTrainingArguments`](https://sbert.net/docs/package_reference/sentence_transformer/training_args.html#sentencetransformertrainingarguments) via `multi_dataset_batch_sampler`. The valid options are: - `MultiDatasetBatchSamplers.ROUND_ROBIN`: Samples from each dataset in a round-robin fashion until one is exhausted. This strategy may not use all samples from each dataset, but it ensures equal sampling from each dataset. - `MultiDatasetBatchSamplers.PROPORTIONAL` (default): Samples from each dataset proportionally to its size. This strategy ensures that all samples from each dataset are used, and larger datasets are sampled from more frequently. Multi-task training has proven to be highly effective. For instance, [Huang et al. 2024](https://arxiv.org/pdf/2405.06932) employed [`MultipleNegativesRankingLoss`](https://sbert.net/docs/package_reference/sentence_transformer/losses.html#multiplenegativesrankingloss), [`CoSENTLoss`](https://sbert.net/docs/package_reference/sentence_transformer/losses.html#cosentloss), and a variation of [`MultipleNegativesRankingLoss`](https://sbert.net/docs/package_reference/sentence_transformer/losses.html#multiplenegativesrankingloss) without in-batch negatives and only hard negatives to achieve state-of-the-art performance on Chinese. They also applied [`MatryoshkaLoss`](https://sbert.net/docs/package_reference/sentence_transformer/losses.html#matryoshkaloss) to enable the model to produce [Matryoshka Embeddings](https://huggingface.co/blog/matryoshka). Here's an example of multi-dataset training: ```python from datasets import load_dataset from sentence_transformers import SentenceTransformer, SentenceTransformerTrainer from sentence_transformers.losses import CoSENTLoss, MultipleNegativesRankingLoss, SoftmaxLoss # 1. Load a model to finetune model = SentenceTransformer("bert-base-uncased") # 2. Loadseveral Datasets to train with # (anchor, positive) all_nli_pair_train = load_dataset("sentence-transformers/all-nli", "pair", split="train[:10000]") # (premise, hypothesis) + label all_nli_pair_class_train = load_dataset("sentence-transformers/all-nli", "pair-class", split="train[:10000]") # (sentence1, sentence2) + score all_nli_pair_score_train = load_dataset("sentence-transformers/all-nli", "pair-score", split="train[:10000]") # (anchor, positive, negative) all_nli_triplet_train = load_dataset("sentence-transformers/all-nli", "triplet", split="train[:10000]") # (sentence1, sentence2) + score stsb_pair_score_train = load_dataset("sentence-transformers/stsb", split="train[:10000]") # (anchor, positive) quora_pair_train = load_dataset("sentence-transformers/quora-duplicates", "pair", split="train[:10000]") # (query, answer) natural_questions_train = load_dataset("sentence-transformers/natural-questions", split="train[:10000]") # Combine all datasets into a dictionary with dataset names to datasets train_dataset = { "all-nli-pair": all_nli_pair_train, "all-nli-pair-class": all_nli_pair_class_train, "all-nli-pair-score": all_nli_pair_score_train, "all-nli-triplet": all_nli_triplet_train, "stsb": stsb_pair_score_train, "quora": quora_pair_train, "natural-questions": natural_questions_train, } # 3. Load several Datasets to evaluate with # (anchor, positive, negative) all_nli_triplet_dev = load_dataset("sentence-transformers/all-nli", "triplet", split="dev") # (sentence1, sentence2, score) stsb_pair_score_dev = load_dataset("sentence-transformers/stsb", split="validation") # (anchor, positive) quora_pair_dev = load_dataset("sentence-transformers/quora-duplicates", "pair", split="train[10000:11000]") # (query, answer) natural_questions_dev = load_dataset("sentence-transformers/natural-questions", split="train[10000:11000]") # Use a dictionary for the evaluation dataset too, or just use one dataset or none at all eval_dataset = { "all-nli-triplet": all_nli_triplet_dev, "stsb": stsb_pair_score_dev, "quora": quora_pair_dev, "natural-questions": natural_questions_dev, } # 4. Load several loss functions to train with # (anchor, positive), (anchor, positive, negative) mnrl_loss = MultipleNegativesRankingLoss(model) # (sentence_A, sentence_B) + class softmax_loss = SoftmaxLoss(model) # (sentence_A, sentence_B) + score cosent_loss = CoSENTLoss(model) # Create a mapping with dataset names to loss functions, so the trainer knows which loss to apply where # Note: You can also just use one loss if all your training/evaluation datasets use the same loss losses = { "all-nli-pair": mnrl_loss, "all-nli-pair-class": softmax_loss, "all-nli-pair-score": cosent_loss, "all-nli-triplet": mnrl_loss, "stsb": cosent_loss, "quora": mnrl_loss, "natural-questions": mnrl_loss, } # 5. Define a simple trainer, although it's recommended to use one with args & evaluators trainer = SentenceTransformerTrainer( model=model, train_dataset=train_dataset, eval_dataset=eval_dataset, loss=losses, ) trainer.train() # 6. Save the trained model and optionally push it to the Hugging Face Hub model.save_pretrained("bert-base-all-nli-stsb-quora-nq") model.push_to_hub("bert-base-all-nli-stsb-quora-nq") ``` ## Deprecation Prior to the Sentence Transformer v3 release, all models would be trained using the [`SentenceTransformer.fit`](https://sbert.net/docs/package_reference/sentence_transformer/SentenceTransformer.html#sentence_transformers.SentenceTransformer.fit) method. Rather than deprecating this method, starting from v3.0, this method will use the [`SentenceTransformerTrainer`](https://sbert.net/docs/package_reference/sentence_transformer/trainer.html#sentence_transformers.trainer.SentenceTransformerTrainer) behind the scenes. This means that your old training code should still work, and should even be upgraded with the new features such as multi-gpu training, loss logging, etc. That said, the new training approach is much more powerful, so it is **recommended** to write new training scripts using the new approach. ## Additional Resources ### Training Examples The following pages contain training examples with explanations as well as links to code. We recommend that you browse through these to familiarize yourself with the training loop: * [Semantic Textual Similarity](https://sbert.net/examples/training/sts/README.html) * [Natural Language Inference](https://sbert.net/examples/training/nli/README.html) * [Paraphrases](https://sbert.net/examples/training/paraphrases/README.html) * [Quora Duplicate Questions](https://sbert.net/examples/training/quora_duplicate_questions/README.html) * [Matryoshka Embeddings](https://sbert.net/examples/training/matryoshka/README.html) * [Adaptive Layer Models](https://sbert.net/examples/training/adaptive_layer/README.html) * [Multilingual Models](https://sbert.net/examples/training/multilingual/README.html) * [Model Distillation](https://sbert.net/examples/training/distillation/README.html) * [Augmented Sentence Transformers](https://sbert.net/examples/training/data_augmentation/README.html) ### Documentation Additionally, the following pages may be useful to learn more about Sentence Transformers: * [Installation](https://sbert.net/docs/installation.html) * [Quickstart](https://sbert.net/docs/quickstart.html) * [Usage](https://sbert.net/docs/sentence_transformer/usage/usage.html) * [Pretrained Models](https://sbert.net/docs/sentence_transformer/pretrained_models.html) * [Training Overview](https://sbert.net/docs/sentence_transformer/training_overview.html) (This blogpost is a distillation of the Training Overiew documentation) * [Dataset Overview](https://sbert.net/docs/sentence_transformer/dataset_overview.html) * [Loss Overview](https://sbert.net/docs/sentence_transformer/loss_overview.html) * [API Reference](https://sbert.net/docs/package_reference/sentence_transformer/index.html) And lastly, here are some advanced pages that might interest you: * [Hyperparameter Optimization](https://sbert.net/examples/training/hpo/README.html) * [Distributed Training](https://sbert.net/docs/sentence_transformer/training/distributed.html)
1
0
hf_public_repos
hf_public_repos/blog/idefics2.md
--- title: "Introducing Idefics2: A Powerful 8B Vision-Language Model for the community" thumbnail: /blog/assets/idefics/thumbnail.png authors: - user: Leyo - user: HugoLaurencon - user: VictorSanh --- <p align="center"> <img src="https://huggingface.co/HuggingFaceM4/idefics-80b/resolve/main/assets/IDEFICS.png" alt="Idefics-Obelics logo" width="250" height="250"> </p> # Introducing Idefics2: A Powerful 8B Vision-Language Model for the community We are excited to release [Idefics2](https://huggingface.co/HuggingFaceM4/idefics2-8b), a general multimodal model that takes as input arbitrary sequences of texts and images, and generates text responses. It can answer questions about images, describe visual content, create stories grounded in multiple images, extract information from documents, and perform basic arithmetic operations. \ Idefics2 improves upon [Idefics1](https://huggingface.co/blog/idefics): with 8B parameters, an open license (Apache 2.0), and enhanced OCR (Optical Character Recognition) capabilities, Idefics2 is a strong foundation for the community working on multimodality. Its performance on Visual Question Answering benchmarks is top of its class size, and competes with much larger models such as [LLava-Next-34B](https://huggingface.co/liuhaotian/llava-v1.6-34b) and [MM1-30B-chat](https://huggingface.co/papers/2403.09611). \ Idefics2 is also integrated in 🤗 Transformers from the get-go and therefore is straightforward to finetune for many multimodal applications. You can try out the [models](https://huggingface.co/HuggingFaceM4/idefics2-8b) on the Hub right now! <p align="left"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/blog/idefics2/Idefics2_eval_barchart.png?download=true" width="900" alt="The Cauldron"/> </p> | <nobr>Model</nobr> | <nobr>Open <br>weights</nobr> | <nobr>Size</nobr> | <nobr># tokens <br>per image</nobr> | <nobr>MMMU <br>(val/test)</nobr> | <nobr>MathVista <br>(testmini)</nobr> | <nobr>TextVQA <br>(val)</nobr> | <nobr>MMBench <br>(test)</nobr> | <nobr>VQAv2 <br>(test-dev)</nobr> | <nobr>DocVQA <br>(test)</nobr> | |--------------|-------------|------|--------------------|-----------|-----------|---------|---------|---------|---------| | [DeepSeek-VL](https://huggingface.co/deepseek-ai/deepseek-vl-7b-chat) | ✅ | 7B | 576 | 36.6/- | 36.1 | 64.4 | 73.2 | - | 49.6 | | [LLaVa-NeXT-Mistral-7B](https://huggingface.co/liuhaotian/llava-v1.6-mistral-7b) | ✅ | 7B | 2880 | 35.3/- | 37.7 | 65.7 | 68.7 | 82.2 | - | | [LLaVa-NeXT-13B](https://huggingface.co/liuhaotian/llava-v1.6-vicuna-13b) | ✅ | 13B | 2880 | 36.2/- | 35.3 | 67.1 | 70.0 | 82.8 | - | | [LLaVa-NeXT-34B](https://huggingface.co/liuhaotian/llava-v1.6-34b) | ✅ | 34B | 2880 | 51.1/44.7 | 46.5 | 69.5 | 79.3 | 83.7 | - | - | | MM1-Chat-7B | ❌ | 7B | 720 | 37.0/35.6 | 35.9 | 72.8 | 72.3 | 82.8 | - | | MM1-Chat-30B | ❌ | 30B | 720 | 44.7/40.3 | 39.4 | 73.5 | 75.1 | 83.7 | | | Gemini 1.0 Pro | ❌ | 🤷‍♂️ | 🤷‍♂️ | 47.9/- | 45.2 | 74.6 | - | 71.2 | 88.1 | | Gemini 1.5 Pro | ❌ | 🤷‍♂️ | 🤷‍♂️ | 58.5/- | 52.1 | 73.5 | - | 73.2 | 86.5 | | Claude 3 Haiku | ❌ |🤷‍♂️ | 🤷‍♂️ | 50.2/- | 46.4 | - | - | - | 88.8 | | | | | | | | | | [Idefics1 instruct](https://huggingface.co/HuggingFaceM4/idefics-80b-instruct) (32-shots) | ✅ | 80B | - | - | - | 39.3 | - | 68.8 | - | | | | | | | | | | **Idefics2** (w/o im. split)* | ✅ | 8B | 64 | 43.5/37.9 | 51.6 | 70.4 | 76.8 | 80.8 | 67.3 | | **Idefics2** (w/ im. split)* | ✅ | 8B | 320 | 43.0/37.7 | 51.4 | 73.0 | 76.7 | 81.2 | 74.0 | \* w/ im. split: Following the strategy from SPHINX and LLaVa-NeXT, we allow for an optional sub-image splitting in 4. ## Training Data Idefics2 was trained on a mixture of openly available datasets for the pretraining: Interleaved webdocuments (Wikipedia,[OBELICS](https://huggingface.co/datasets/HuggingFaceM4/OBELICS)), image-caption pairs (Public Multimodal Dataset, LAION-COCO), OCR data ([PDFA (en)](https://huggingface.co/datasets/pixparse/pdfa-eng-wds), [IDL](https://huggingface.co/datasets/pixparse/idl-wds) and [Rendered-text](https://huggingface.co/datasets/wendlerc/RenderedText), and image-to-code data ([WebSight](https://huggingface.co/datasets/HuggingFaceM4/WebSight))). \ The [interactive visualization](https://atlas.nomic.ai/map/f2fba2aa-3647-4f49-a0f3-9347daeee499/ee4a84bd-f125-4bcc-a683-1b4e231cb10f) allows exploring the OBELICS dataset. \ Following common practices in the foundation model community, we further train the base model on task-oriented data. However, these data are often in disparate formats, and scattered in various places. Gathering them is a barrier for the community. To address that problem, we are releasing the multimodal instruction fine-tuning dataset we've been cooking: *[The Cauldron](https://huggingface.co/datasets/HuggingFaceM4/the_cauldron)*, an open compilation of **50** manually-curated datasets formatted for multi-turn conversations. We instruction fine-tuned Idefics2 on the concatenation of The Cauldron and various text-only instruction fine-tuning datasets. <p align="center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/blog/idefics2/The_Cauldron.png?download=true" width="400" alt="The Cauldron"/> </p> ## Improvements over Idefics1 * We manipulate images in their native resolutions (up to 980 x 980) and native aspect ratios by following the NaViT strategy. That circumvents the need to resize images to fixed-size squares as it has been historically done in the computer vision community. Additionally, we follow the strategy from SPHINX and (optionally) allow sub-image splitting and passing images of very large resolution. * We significantly enhanced OCR abilities by integrating data that requires the model to transcribe text in an image or a document. We also improved abilities in answering questions on charts, figures, and documents with appropriate training data. * We departed from the Idefics1's architecture (gated cross-attentions) and simplified the integration of visual features into the language backbone. The images are fed to the vision encoder followed by a learned Perceiver pooling and an MLP modality projection. That pooled sequence is then concatenated with the text embeddings to obtain an (interleaved) sequence of image(s) and text(s). All of these improvements along with better pre-trained backbones yield a significant jump in performance over Idefics1 for a model that is 10x smaller. <p align="center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/blog/idefics2/Idefics2_flowchart.png?download=true" alt="Idefics2 Architecture" width="250" height="350"> </p> ## Getting Started with Idefics2 Idefics2 is available on the Hugging Face Hub and supported in the last `transformers` version. Here is a code sample to try it out: ```python import requests import torch from PIL import Image from transformers import AutoProcessor, AutoModelForVision2Seq from transformers.image_utils import load_image DEVICE = "cuda:0" # Note that passing the image urls (instead of the actual pil images) to the processor is also possible image1 = load_image("https://cdn.britannica.com/61/93061-050-99147DCE/Statue-of-Liberty-Island-New-York-Bay.jpg") image2 = load_image("https://cdn.britannica.com/59/94459-050-DBA42467/Skyline-Chicago.jpg") image3 = load_image("https://cdn.britannica.com/68/170868-050-8DDE8263/Golden-Gate-Bridge-San-Francisco.jpg") processor = AutoProcessor.from_pretrained("HuggingFaceM4/idefics2-8b") model = AutoModelForVision2Seq.from_pretrained( "HuggingFaceM4/idefics2-8b", ).to(DEVICE) # Create inputs messages = [ { "role": "user", "content": [ {"type": "image"}, {"type": "text", "text": "What do we see in this image?"}, ] }, { "role": "assistant", "content": [ {"type": "text", "text": "In this image, we can see the city of New York, and more specifically the Statue of Liberty."}, ] }, { "role": "user", "content": [ {"type": "image"}, {"type": "text", "text": "And how about this image?"}, ] }, ] prompt = processor.apply_chat_template(messages, add_generation_prompt=True) inputs = processor(text=prompt, images=[image1, image2], return_tensors="pt") inputs = {k: v.to(DEVICE) for k, v in inputs.items()} # Generate generated_ids = model.generate(**inputs, max_new_tokens=500) generated_texts = processor.batch_decode(generated_ids, skip_special_tokens=True) print(generated_texts) ``` We also provide a fine-tuning [colab](https://colab.research.google.com/drive/1NtcTgRbSBKN7pYD3Vdx1j9m8pt3fhFDB?usp=sharing) which should come in handy for anyone looking to improve Idefics2 on specific use cases. <p align="left"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/blog/idefics2/This_is_fine_example.png?download=true" width="1200" alt="The Cauldron"/> </p> ## Resources If you wish to deep dive further, here is the compilation of all resources for Idefics2: * [Idefics2 collection](https://huggingface.co/collections/HuggingFaceM4/idefics2-661d1971b7c50831dd3ce0fe) * [Idefics2 model with model card](https://huggingface.co/HuggingFaceM4/idefics2-8b) * [Idefics2-base model with model card](https://huggingface.co/HuggingFaceM4/idefics2-8b-base) * [Idefics2-chat model with model card](https://huggingface.co/HuggingFaceM4/idefics2-8b-chatty) * [The Cauldron with its dataset card](https://huggingface.co/datasets/HuggingFaceM4/the_cauldron) * [OBELICS with its dataset card](https://huggingface.co/datasets/HuggingFaceM4/OBELICS) * [WebSight with its dataset card](https://huggingface.co/datasets/HuggingFaceM4/WebSight) * [Idefics2 fine-tuning colab](https://colab.research.google.com/drive/1rm3AGquGEYXfeeizE40bbDtcWh5S4Nlq?usp=sharing) * [Idefics2-8B model demo (not the chatty model)](https://huggingface.co/spaces/HuggingFaceM4/idefics-8b) * [Idefics2 demo](https://huggingface.co/spaces/HuggingFaceM4/idefics2_playground) * [Idefics2 paper](https://arxiv.org/abs/2405.02246) ## License The model is built on top of two pre-trained models: [Mistral-7B-v0.1](https://huggingface.co/mistralai/Mistral-7B-v0.1) and [siglip-so400m-patch14-384](https://huggingface.co/google/siglip-so400m-patch14-384). Both of them have been released under Apache-2.0 license. We release Idefics2 weights under an Apache-2.0 license as well. ## Acknowledgments Thank you to the Google Team and Mistral AI for releasing and making their models available to the open-source AI community! Special thanks to Chun Te Lee for the barplot, and Merve Noyan for the review and suggestions on the blogpost 🤗
2
0
hf_public_repos
hf_public_repos/blog/chat-templates.md
--- title: "Chat Templates: An End to the Silent Performance Killer" thumbnail: /blog/assets/chat-templates/thumbnail.png authors: - user: rocketknight1 --- # Chat Templates > *A spectre is haunting chat models - the spectre of incorrect formatting!* ## tl;dr Chat models have been trained with very different formats for converting conversations into a single tokenizable string. Using a format different from the format a model was trained with will usually cause severe, silent performance degradation, so matching the format used during training is extremely important! Hugging Face tokenizers now have a `chat_template` attribute that can be used to save the chat format the model was trained with. This attribute contains a Jinja template that converts conversation histories into a correctly formatted string. Please see the [technical documentation](https://huggingface.co/docs/transformers/main/en/chat_templating) for information on how to write and apply chat templates in your code. ## Introduction If you're familiar with the 🤗 Transformers library, you've probably written code like this: ```python tokenizer = AutoTokenizer.from_pretrained(checkpoint) model = AutoModel.from_pretrained(checkpoint) ``` By loading the tokenizer and model from the same checkpoint, you ensure that inputs are tokenized in the way the model expects. If you pick a tokenizer from a different model, the input tokenization might be completely different, and the result will be that your model's performance will be seriously damaged. The term for this is a **distribution shift** - the model has been learning data from one distribution (the tokenization it was trained with), and suddenly it has shifted to a completely different one. Whether you're fine-tuning a model or using it directly for inference, it's always a good idea to minimize these distribution shifts and keep the input you give it as similar as possible to the input it was trained on. With regular language models, it's relatively easy to do that - simply load your tokenizer and model from the same checkpoint, and you're good to go. With chat models, however, it's a bit different. This is because "chat" is not just a single string of text that can be straightforwardly tokenized - it's a sequence of messages, each of which contains a `role` as well as `content`, which is the actual text of the message. Most commonly, the roles are "user" for messages sent by the user, "assistant" for responses written by the model, and optionally "system" for high-level directives given at the start of the conversation. If that all seems a bit abstract, here's an example chat to make it more concrete: ```python [ {"role": "user", "content": "Hi there!"}, {"role": "assistant", "content": "Nice to meet you!"} ] ``` This sequence of messages needs to be converted into a text string before it can be tokenized and used as input to a model. The problem, though, is that there are many ways to do this conversion! You could, for example, convert the list of messages into an "instant messenger" format: ``` User: Hey there! Bot: Nice to meet you! ``` Or you could add special tokens to indicate the roles: ``` [USER] Hey there! [/USER] [ASST] Nice to meet you! [/ASST] ``` Or you could add tokens to indicate the boundaries between messages, but insert the role information as a string: ``` <|im_start|>user Hey there!<|im_end|> <|im_start|>assistant Nice to meet you!<|im_end|> ``` There are lots of ways to do this, and none of them is obviously the best or correct way to do it. As a result, different models have been trained with wildly different formatting. I didn't make these examples up; they're all real and being used by at least one active model! But once a model has been trained with a certain format, you really want to ensure that future inputs use the same format, or else you could get a performance-destroying distribution shift. ## Templates: A way to save format information Right now, if you're lucky, the format you need is correctly documented somewhere in the model card. If you're unlucky, it isn't, so good luck if you want to use that model. In extreme cases, we've even put the whole prompt format in [a blog post](https://huggingface.co/blog/llama2#how-to-prompt-llama-2) to ensure that users don't miss it! Even in the best-case scenario, though, you have to locate the template information and manually code it up in your fine-tuning or inference pipeline. We think this is an especially dangerous issue because using the wrong chat format is a **silent error** - you won't get a loud failure or a Python exception to tell you something is wrong, the model will just perform much worse than it would have with the right format, and it'll be very difficult to debug the cause! This is the problem that **chat templates** aim to solve. Chat templates are [Jinja template strings](https://jinja.palletsprojects.com/en/3.1.x/) that are saved and loaded with your tokenizer, and that contain all the information needed to turn a list of chat messages into a correctly formatted input for your model. Here are three chat template strings, corresponding to the three message formats above: ```jinja {% for message in messages %} {% if message['role'] == 'user' %} {{ "User : " }} {% else %} {{ "Bot : " }} {{ message['content'] + '\n' }} {% endfor %} ``` ```jinja {% for message in messages %} {% if message['role'] == 'user' %} {{ "[USER] " + message['content'] + " [/USER]" }} {% else %} {{ "[ASST] " + message['content'] + " [/ASST]" }} {{ message['content'] + '\n' }} {% endfor %} ``` ```jinja "{% for message in messages %}" "{{'<|im_start|>' + message['role'] + '\n' + message['content'] + '<|im_end|>' + '\n'}}" "{% endfor %}" ``` If you're unfamiliar with Jinja, I strongly recommend that you take a moment to look at these template strings, and their corresponding template outputs, and see if you can convince yourself that you understand how the template turns a list of messages into a formatted string! The syntax is very similar to Python in a lot of ways. ## Why templates? Although Jinja can be confusing at first if you're unfamiliar with it, in practice we find that Python programmers can pick it up quickly. During development of this feature, we considered other approaches, such as a limited system to allow users to specify per-role prefixes and suffixes for messages. We found that this could become confusing and unwieldy, and was so inflexible that hacky workarounds were needed for several models. Templating, on the other hand, is powerful enough to cleanly support all of the message formats that we're aware of. ## Why bother doing this? Why not just pick a standard format? This is an excellent idea! Unfortunately, it's too late, because multiple important models have already been trained with very different chat formats. However, we can still mitigate this problem a bit. We think the closest thing to a 'standard' for formatting is the [ChatML format](https://github.com/openai/openai-python/blob/main/chatml.md) created by OpenAI. If you're training a new model for chat, and this format is suitable for you, we recommend using it and adding special `<|im_start|>` and `<|im_end|>` tokens to your tokenizer. It has the advantage of being very flexible with roles, as the role is just inserted as a string rather than having specific role tokens. If you'd like to use this one, it's the third of the templates above, and you can set it with this simple one-liner: ```py tokenizer.chat_template = "{% for message in messages %}{{'<|im_start|>' + message['role'] + '\n' + message['content'] + '<|im_end|>' + '\n'}}{% endfor %}" ``` There's also a second reason not to hardcode a standard format, though, beyond the proliferation of existing formats - we expect that templates will be broadly useful in preprocessing for many types of models, including those that might be doing very different things from standard chat. Hardcoding a standard format limits the ability of model developers to use this feature to do things we haven't even thought of yet, whereas templating gives users and developers maximum freedom. It's even possible to encode checks and logic in templates, which is a feature we don't use extensively in any of the default templates, but which we expect to have enormous power in the hands of adventurous users. We strongly believe that the open-source ecosystem should enable you to do what you want, not dictate to you what you're permitted to do. ## How do templates work? Chat templates are part of the **tokenizer**, because they fulfill the same role as tokenizers do: They store information about how data is preprocessed, to ensure that you feed data to the model in the same format that it saw during training. We have designed it to be very easy to add template information to an existing tokenizer and save it or upload it to the Hub. Before chat templates, chat formatting information was stored at the **class level** - this meant that, for example, all LLaMA checkpoints would get the same chat formatting, using code that was hardcoded in `transformers` for the LLaMA model class. For backward compatibility, model classes that had custom chat format methods have been given **default chat templates** instead. Default chat templates are also set at the class level, and tell classes like `ConversationPipeline` how to format inputs when the model does not have a chat template. We're doing this **purely for backwards compatibility** - we highly recommend that you explicitly set a chat template on any chat model, even when the default chat template is appropriate. This ensures that any future changes or deprecations in the default chat template don't break your model. Although we will be keeping default chat templates for the foreseeable future, we hope to transition all models to explicit chat templates over time, at which point the default chat templates may be removed entirely. For information about how to set and apply chat templates, please see the [technical documentation](https://huggingface.co/docs/transformers/main/en/chat_templating). ## How do I get started with templates? Easy! If a tokenizer has the `chat_template` attribute set, it's ready to go. You can use that model and tokenizer in `ConversationPipeline`, or you can call `tokenizer.apply_chat_template()` to format chats for inference or training. Please see our [developer guide](https://huggingface.co/docs/transformers/main/en/chat_templating) or the [apply_chat_template documentation](https://huggingface.co/docs/transformers/main/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.apply_chat_template) for more! If a tokenizer doesn't have a `chat_template` attribute, it might still work, but it will use the default chat template set for that model class. This is fragile, as we mentioned above, and it's also a source of silent bugs when the class template doesn't match what the model was actually trained with. If you want to use a checkpoint that doesn't have a `chat_template`, we recommend checking docs like the model card to verify what the right format is, and then adding a correct `chat_template`for that format. We recommend doing this even if the default chat template is correct - it future-proofs the model, and also makes it clear that the template is present and suitable. You can add a `chat_template` even for checkpoints that you're not the owner of, by opening a [pull request](https://huggingface.co/docs/hub/repositories-pull-requests-discussions). The only change you need to make is to set the `tokenizer.chat_template` attribute to a Jinja template string. Once that's done, push your changes and you're ready to go! If you'd like to use a checkpoint for chat but you can't find any documentation on the chat format it used, you should probably open an issue on the checkpoint or ping the owner! Once you figure out the format the model is using, please open a pull request to add a suitable `chat_template`. Other users will really appreciate it! ## Conclusion: Template philosophy We think templates are a very exciting change. In addition to resolving a huge source of silent, performance-killing bugs, we think they open up completely new approaches and data modalities. Perhaps most importantly, they also represent a philosophical shift: They take a big function out of the core `transformers` codebase and move it into individual model repos, where users have the freedom to do weird and wild and wonderful things. We're excited to see what uses you find for them!
3
0
hf_public_repos
hf_public_repos/blog/packing-with-FA2.md
--- title: "Improving Hugging Face Training Efficiency Through Packing with Flash Attention 2" thumbnail: /blog/assets/packing-with-FA2/thumbnail.png authors: - user: RQlee guest: true org: ibm - user: ArthurZ - user: achikundu guest: true org: ibm - user: lwtr guest: true org: ibm - user: rganti guest: true org: ibm - user: mayank-mishra guest: true org: ibm --- ## TL;DR Training with packed instruction tuning examples (without padding) is now compatible with Flash Attention 2 in Hugging Face, thanks to a [recent PR](https://github.com/huggingface/transformers/pull/31629) and the new [DataCollatorWithFlattening](https://huggingface.co/docs/transformers/main/en/main_classes/data_collator#transformers.DataCollatorWithFlattening) It can provide up to 2x improvement in training throughput while maintaining convergence quality. Read on for the details! ## Introduction Padding input sequences in mini-batches is a usual method to collate inputs during training. However, this introduces inefficiencies because of the irrelevant padding tokens. Packing examples without padding, and using the token position information, is a more efficient alternative. However, previous implementations of packing did not consider example boundaries when using Flash Attention 2, resulting in undesired cross-example attention that reduce quality and convergence. Hugging Face Transformers now addresses this with a new feature that maintains boundary awareness during packing, alongside the introduction of a new data collator, `DataCollatorWithFlattening`. By selecting `DataCollatorWithFlattening`, Hugging Face `Trainer` users can now seamlessly concatenate sequences into a single tensor while accounting for sequence boundaries during Flash Attention 2 computations. This is achieved through the `flash_attn_varlen_func`, which calculates the cumulative sequence lengths in each mini-batch (`cu_seqlens`). The same feature is available to Hugging Face `SFTTrainer` users in the `TRL` library by setting a new flag, `padding_free=True`, when calling the data collator `DataCollatorForCompletionOnlyLM`. ## Up to 2x throughput increase We see significant improvement in training throughput using this feature with the new `DataCollatorWithFlattening`. The figure below shows the throughput measured in tokens/second during training. In this example, the throughput is the per-GPU average over 8 A100-80 GPU over one epoch of a 20K randomly selected sample from two different instruct tuning datasets, FLAN and OrcaMath. ![throughput](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/blog/packing-with-FA2/thruput.png) FLAN has short sequences on average but a large variance in sequence length, so example lengths in each batch may vary widely. This means that padded FLAN batches may incur a significant overhead in unused padding tokens. Training on the FLAN dataset shows a significant benefit using the new `DataCollatorWithFlattening` in terms of increased throughput. We see a 2x throughput increase on the models shown here: llama2-7B, mistral-7B, and granite-8B-code. OrcaMath has longer examples and a lower variance in example length. As such, the improvement from packing is lower. Our experiments show a 1.4x increase in throughput when training using this form of packing on the OrcaMath dataset across these three models. ![memory](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/blog/packing-with-FA2/memory.png) Memory usage also improves through packing with the new `DataCollatorWithFlattening`. The following figure shows the peak memory usage of the same three models training on the same two datasets. Peak memory is reduced by 20% on the FLAN dataset, which benefits considerably from packing. Peak memory reduction is 6% on the OrcaMath dataset with its more homogeneous example lengths. Packing examples, when it reduces the number of optimization steps, may harm training convergence. The new feature, however, retains the minibatches and, hence, the same number of optimization steps as would be used with padded examples. Thus, there is no impact on train convergence, as we see in the next figure, which shows identical validation loss of the same three models training on the same two datasets, whether the models are trained with packing using the new `DataCollatorWithFlattening` or with padding. ![ValLoss](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/blog/packing-with-FA2/ValLoss.png) ## How it works Consider a batch of data with a batchsize = 4 where the four sequences are as follows: ![batch](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/blog/packing-with-FA2/four_sequences.png) After concatenating the examples, the padding-free collator returns the `input_ids`, `labels`, and `position_ids` of each example. Hence, the collator provides, for this batch of data, ![example](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/blog/packing-with-FA2/input_ids_labels_position_ids.png) The modifications required are lightweight and are limited to providing the `position_ids` to Flash Attention 2. This relies, however, on the model exposing `position_ids`. As of the time of writing, 14 models expose them and are supported by the solution. Specifically, Llama 2 and 3, Mistral, Mixtral, Granite, DBRX, Falcon, Gemma, OLMo, Phi 1, 2, and 3, phi3, Qwen 2 and 2 MoE, StableLM, and StarCoder 2 are all supported by the solution. ## Getting started Reaping the benefits of packing with `position_ids` is easy. If you are using Hugging Face `Trainer` from `Transformers`, only two steps are required: 1) Instantiate the model with Flash Attention 2 2) Use the new `DataCollatorWithFlattening` If you are using Hugging Face `SFTTrainer` from `TRL` with `DataCollatorForCompletionOnlyLM`, then the two required steps are: 1) Instantiate the model with Flash Attention 2 2) Set `padding_free=True` when calling `DataCollatorForCompletionOnlyLM` as follows: `collator = DataCollatorForCompletionOnlyLM(response_template_ids, tokenizer=tokenizer, padding_free=True)` ## How to use it For `Trainer` users, the example below illustrates how to use the new feature. ```Python # Example using DataCollatorWithFlattening import torch # load model as usual from transformers import AutoModelForCausalLM model = AutoModelForCausalLM.from_pretrained( "instructlab/merlinite-7b-lab", torch_dtype=torch.bfloat16, attn_implementation="flash_attention_2" ) # read dataset as usual from datasets import load_dataset train_dataset = load_dataset("json", data_files="path/to/my/dataset")["train"] # use DataCollatorWithFlattening from transformers import DataCollatorWithFlattening data_collator = DataCollatorWithFlattening() # train from transformers import TrainingArguments, Trainer train_args = TrainingArguments(output_dir="/save/path") trainer = Trainer( args=train_args, model=model, train_dataset=train_dataset, data_collator=data_collator ) trainer.train() ``` For `TRL` users, the example below shows how to use the new feature with `SFTTrainer`. ```Python # SFTTrainer example using DataCollatorForCompletionOnlyLM import torch from transformers import AutoModelForCausalLM, AutoTokenizer from datasets import load_dataset from trl import SFTConfig, SFTTrainer, DataCollatorForCompletionOnlyLM dataset = load_dataset("lucasmccabe-lmi/CodeAlpaca-20k", split="train") model = AutoModelForCausalLM.from_pretrained( "instructlab/merlinite-7b-lab", torch_dtype=torch.bfloat16, attn_implementation="flash_attention_2") tokenizer = AutoTokenizer.from_pretrained("instructlab/merlinite-7b-lab") tokenizer.pad_token = tokenizer.eos_token def formatting_prompts_func(example): output_texts = [] for i in range(len(example['instruction'])): text = f"### Question: {example['instruction'][i]}\n ### Answer: {example['output'][i]}" output_texts.append(text) return output_texts response_template = " ### Answer:" response_template_ids = tokenizer.encode(response_template, add_special_tokens=False)[2:] collator = DataCollatorForCompletionOnlyLM(response_template_ids, tokenizer=tokenizer, padding_free=True) trainer = SFTTrainer( model, train_dataset=dataset, args=SFTConfig( output_dir="./tmp", gradient_checkpointing=True, per_device_train_batch_size=8 ), formatting_func=formatting_prompts_func, data_collator=collator, ) trainer.train() ``` ## Conclusions Packing instruction tuning examples, instead of padding, is now fully compatible with Flash Attention 2, thanks to a recent PR and the new `DataCollatorWithFlattening`. The method is compatible with models that use `position_ids`. Benefits can be seen in throughput and peak memory usage during training, with no degradation in training convergence. Actual throughput and memory improvement depends on the model and the distribution of example lengths in the training data. Training with data that has a wide variation of example lengths will see the greatest benefit, with respect to padding, by using the `DataCollatorWithFlattening`. The same feature is available to `SFTTrainer` users in the `TRL` library by setting a new flag, `padding_free=True`, when calling `DataCollatorForCompletionOnlyLM`. For a more detailed analysis, have a look at the paper at https://huggingface.co/papers/2407.09105
4
0
hf_public_repos
hf_public_repos/blog/stable-diffusion-finetuning-intel.md
--- title: "Fine-tuning Stable Diffusion models on Intel CPUs" thumbnail: /blog/assets/stable-diffusion-finetuning-intel/01.png authors: - user: juliensimon --- # Fine-tuning Stable Diffusion Models on Intel CPUs Diffusion models helped popularize generative AI thanks to their uncanny ability to generate photorealistic images from text prompts. These models have now found their way into enterprise use cases like synthetic data generation or content creation. The Hugging Face hub includes over 5,000 pre-trained text-to-image [models](https://huggingface.co/models?pipeline_tag=text-to-image&sort=trending). Combining them with the [Diffusers library](https://huggingface.co/docs/diffusers/index), it's never been easier to start experimenting and building image generation workflows. Like Transformer models, you can fine-tune Diffusion models to help them generate content that matches your business needs. Initially, fine-tuning was only possible on GPU infrastructure, but things are changing! A few months ago, Intel [launched](https://www.intel.com/content/www/us/en/newsroom/news/4th-gen-xeon-scalable-processors-max-series-cpus-gpus.html#gs.2d6cd7) the fourth generation of Xeon CPUs, code-named Sapphire Rapids. Sapphire Rapids introduces the Intel Advanced Matrix Extensions (AMX), a new hardware accelerator for deep learning workloads. We've already demonstrated the benefits of AMX in several blog posts: [fine-tuning NLP Transformers](https://huggingface.co/blog/intel-sapphire-rapids), [inference with NLP Transformers](https://huggingface.co/blog/intel-sapphire-rapids-inference), and [inference with Stable Diffusion models](https://huggingface.co/blog/stable-diffusion-inference-intel). This post will show you how to fine-tune a Stable Diffusion model on an Intel Sapphire Rapids CPU cluster. We will use [textual inversion](https://huggingface.co/docs/diffusers/training/text_inversion), a technique that only requires a small number of example images. We'll use only five! Let's get started. ## Setting up the cluster Our friends at [Intel](https://huggingface.co/intel) provided four servers hosted on the [Intel Developer Cloud](https://www.intel.com/content/www/us/en/developer/tools/devcloud/services.html) (IDC), a service platform for developing and running workloads in Intel®-optimized deployment environments with the latest Intel processors and [performance-optimized software stacks](https://www.intel.com/content/www/us/en/developer/topic-technology/artificial-intelligence/overview.html). Each server is powered by two Intel Sapphire Rapids CPUs with 56 physical cores and 112 threads. Here's the output of `lscpu`: ``` Architecture: x86_64 CPU op-mode(s): 32-bit, 64-bit Address sizes: 52 bits physical, 57 bits virtual Byte Order: Little Endian CPU(s): 224 On-line CPU(s) list: 0-223 Vendor ID: GenuineIntel Model name: Intel(R) Xeon(R) Platinum 8480+ CPU family: 6 Model: 143 Thread(s) per core: 2 Core(s) per socket: 56 Socket(s): 2 Stepping: 8 CPU max MHz: 3800.0000 CPU min MHz: 800.0000 BogoMIPS: 4000.00 Flags: fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc art arch_per fmon pebs bts rep_good nopl xtopology nonstop_tsc cpuid aperfmperf tsc_known_freq pni pclmulqdq dtes64 monitor ds_cpl vmx smx est tm2 ssse3 sdbg fma cx16 xtpr pdcm pcid dca sse4_1 sse4_2 x2apic movbe popcnt tsc_deadline_timer aes xsave avx f16c rdrand lahf_lm abm 3dnowprefetch cpuid_fault epb cat_l3 cat_l2 cdp_l3 invpcid_single intel_ppin cdp_l2 ssbd mba ibrs ibpb stibp ibrs_enhanced tpr_shadow vnmi flexpriority ept vpid ept_ad fsgsbase tsc_adjust bmi1 avx2 smep bmi2 erms invpcid cqm rdt_a avx512f avx512dq rdseed adx smap avx512ifma clflushopt clwb intel_pt avx512cd sha_ni avx512bw avx512vl xsaveopt xsavec xgetbv1 xsaves cqm_llc cqm_occup_llc cqm_mbm_total cqm_mbm_local split_lock_detect avx_vnni avx512_bf16 wbnoinvd dtherm ida arat pln pts hwp hwp_act_window hwp_epp hwp_pkg_req avx512vbmi umip pku ospke waitpkg avx512_vbmi2 gfni vaes vpclmulqdq avx512_vnni avx512_bitalg tme avx512_vpopcntdq la57 rdpid bus_lock_detect cldemote movdiri movdir64b enqcmd fsrm md_clear serialize tsxldtrk pconfig arch_lbr amx_bf16 avx512_fp16 amx_tile amx_int8 flush_l1d arch_capabilities ``` Let's first list the IP addresses of our servers in `nodefile.` The first line refers to the primary server. ``` cat << EOF > nodefile 192.168.20.2 192.168.21.2 192.168.22.2 192.168.23.2 EOF ``` Distributed training requires password-less `ssh` between the primary and other nodes. Here's a good [article](https://www.redhat.com/sysadmin/passwordless-ssh) on how to do this if you're unfamiliar with the process. Next, we create a new environment on each node and install the software dependencies. We notably install two Intel libraries: [oneCCL](https://github.com/oneapi-src/oneCCL), to manage distributed communication and the [Intel Extension for PyTorch](https://github.com/intel/intel-extension-for-pytorch) (IPEX) to leverage the hardware acceleration features present in Sapphire Rapids. We also add `gperftools` to install `libtcmalloc,` a high-performance memory allocation library. ``` conda create -n diffuser python==3.9 conda activate diffuser pip3 install torch torchvision torchaudio --index-url https://download.pytorch.org/whl/cpu pip3 install transformers accelerate==0.19.0 pip3 install oneccl_bind_pt -f https://developer.intel.com/ipex-whl-stable-cpu pip3 install intel_extension_for_pytorch conda install gperftools -c conda-forge -y ``` Next, we clone the [diffusers](https://github.com/huggingface/diffusers/) repository on each node and install it from source. ``` git clone https://github.com/huggingface/diffusers.git cd diffusers pip install . ``` Next, we add IPEX to the fine-tuning script in `diffusers/examples/textual_inversion`. We import IPEX and optimize the U-Net and Variable Auto Encoder models. Please make sure this is applied to all nodes. ``` diff --git a/examples/textual_inversion/textual_inversion.py b/examples/textual_inversion/textual_inversion.py index 4a193abc..91c2edd1 100644 --- a/examples/textual_inversion/textual_inversion.py +++ b/examples/textual_inversion/textual_inversion.py @@ -765,6 +765,10 @@ def main(): unet.to(accelerator.device, dtype=weight_dtype) vae.to(accelerator.device, dtype=weight_dtype) + import intel_extension_for_pytorch as ipex + unet = ipex.optimize(unet, dtype=weight_dtype) + vae = ipex.optimize(vae, dtype=weight_dtype) + # We need to recalculate our total training steps as the size of the training dataloader may have changed. num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps) if overrode_max_train_steps: ``` The last step is downloading the [training images](https://huggingface.co/sd-concepts-library/dicoo). Ideally, we'd use a shared NFS folder, but for the sake of simplicity, we'll download the images on each node. Please ensure they're in the same directory on all nodes (`/home/devcloud/dicoo`). ``` mkdir /home/devcloud/dicoo cd /home/devcloud/dicoo wget https://huggingface.co/sd-concepts-library/dicoo/resolve/main/concept_images/0.jpeg wget https://huggingface.co/sd-concepts-library/dicoo/resolve/main/concept_images/1.jpeg wget https://huggingface.co/sd-concepts-library/dicoo/resolve/main/concept_images/2.jpeg wget https://huggingface.co/sd-concepts-library/dicoo/resolve/main/concept_images/3.jpeg wget https://huggingface.co/sd-concepts-library/dicoo/resolve/main/concept_images/4.jpeg ``` Here are the images: <img src="https://huggingface.co/sd-concepts-library/dicoo/resolve/main/concept_images/0.jpeg" height="256"> <img src="https://huggingface.co/sd-concepts-library/dicoo/resolve/main/concept_images/1.jpeg" height="256"> <img src="https://huggingface.co/sd-concepts-library/dicoo/resolve/main/concept_images/2.jpeg" height="256"> <img src="https://huggingface.co/sd-concepts-library/dicoo/resolve/main/concept_images/3.jpeg" height="256"> <img src="https://huggingface.co/sd-concepts-library/dicoo/resolve/main/concept_images/4.jpeg" height="256"> The system setup is now complete. Let's configure the training job. ## Configuring the fine-tuning job The [Accelerate](https://huggingface.co/docs/accelerate/index) library makes it very easy to run distributed training. We need to run it on each node and answer simple questions. Here's a screenshot for the primary node. On the other nodes, you need to set the rank to 1, 2, and 3. All other answers are identical. <kbd> <img src="assets/stable-diffusion-finetuning-intel/screen01.png"> </kbd> Finally, we need to set the environment on the primary node. It will be propagated to other nodes as the fine-tuning job starts. The first line sets the name of the network interface connected to the local network where all nodes run. You may need to adapt this using`ifconfig` to get the appropriate information. ``` export I_MPI_HYDRA_IFACE=ens786f1 oneccl_bindings_for_pytorch_path=$(python -c "from oneccl_bindings_for_pytorch import cwd; print(cwd)") source $oneccl_bindings_for_pytorch_path/env/setvars.sh export LD_PRELOAD=${LD_PRELOAD}:${CONDA_PREFIX}/lib/libiomp5.so export LD_PRELOAD=${LD_PRELOAD}:${CONDA_PREFIX}/lib/libtcmalloc.so export CCL_ATL_TRANSPORT=ofi export CCL_WORKER_COUNT=1 export MODEL_NAME="runwayml/stable-diffusion-v1-5" export DATA_DIR="/home/devcloud/dicoo" ``` We can now launch the fine-tuning job. ## Fine-tuning the model We launch the fine-tuning job with `mpirun`, which sets up distributed communication across the nodes listed in `nodefile`. We'll run 16 tasks (`-n`) with four tasks per node (`-ppn`). `Accelerate` automatically sets up distributed training across all tasks. Here, we train for 200 steps, which should take about five minutes. ``` mpirun -f nodefile -n 16 -ppn 4 \ accelerate launch diffusers/examples/textual_inversion/textual_inversion.py \ --pretrained_model_name_or_path=$MODEL_NAME --train_data_dir=$DATA_DIR \ --learnable_property="object" --placeholder_token="<dicoo>" --initializer_token="toy" \ --resolution=512 --train_batch_size=1 --seed=7 --gradient_accumulation_steps=1 \ --max_train_steps=200 --learning_rate=2.0e-03 --scale_lr --lr_scheduler="constant" \ --lr_warmup_steps=0 --output_dir=./textual_inversion_output --mixed_precision bf16 \ --save_as_full_pipeline ``` Here's a screenshot of the busy cluster: <kbd> <img src="assets/stable-diffusion-finetuning-intel/screen02.png"> </kbd> ## Troubleshooting Distributed training can be tricky, especially if you're new to the discipline. A minor misconfiguration on a single node is the most likely issue: missing dependency, images stored in a different location, etc. You can quickly pinpoint the troublemaker by logging in to each node and training locally. First, set the same environment as on the primary node, then run: ``` python diffusers/examples/textual_inversion/textual_inversion.py \ --pretrained_model_name_or_path=$MODEL_NAME --train_data_dir=$DATA_DIR \ --learnable_property="object" --placeholder_token="<dicoo>" --initializer_token="toy" \ --resolution=512 --train_batch_size=1 --seed=7 --gradient_accumulation_steps=1 \ --max_train_steps=200 --learning_rate=2.0e-03 --scale_lr --lr_scheduler="constant" \ --lr_warmup_steps=0 --output_dir=./textual_inversion_output --mixed_precision bf16 \ --save_as_full_pipeline ``` If training starts successfully, stop it and move to the next node. If training starts successfully on all nodes, return to the primary node and double-check the node file, the environment, and the `mpirun` command. Don't worry; you'll find the problem :) ## Generating images with the fine-tuned model After 5 minutes training, the model is saved locally. We could load it with a vanilla `diffusers` pipeline and predict. Instead, let's use [Optimum Intel and OpenVINO](https://huggingface.co/docs/optimum/intel/inference) to optimize the model. As discussed in a [previous post](https://huggingface.co/blog/intel-sapphire-rapids-inference), this lets you generate an image on a single CPU in less than 5 seconds! ``` pip install optimum[openvino] ``` Here, we load the model, optimize it for a static shape, and save it: ``` from optimum.intel.openvino import OVStableDiffusionPipeline model_id = "./textual_inversion_output" ov_pipe = OVStableDiffusionPipeline.from_pretrained(model_id, export=True) ov_pipe.reshape(batch_size=5, height=512, width=512, num_images_per_prompt=1) ov_pipe.save_pretrained("./textual_inversion_output_ov") ``` Then, we load the optimized model, generate five different images and save them: ``` from optimum.intel.openvino import OVStableDiffusionPipeline model_id = "./textual_inversion_output_ov" ov_pipe = OVStableDiffusionPipeline.from_pretrained(model_id, num_inference_steps=20) prompt = ["a yellow <dicoo> robot at the beach, high quality"]*5 images = ov_pipe(prompt).images print(images) for idx,img in enumerate(images): img.save(f"image{idx}.png") ``` Here's a generated image. It is impressive that the model only needed five images to learn that dicoos have glasses! <kbd> <img src="assets/stable-diffusion-finetuning-intel/dicoo_image_200.png"> </kbd> If you'd like, you can fine-tune the model some more. Here's a lovely example generated by a 3,000-step model (about an hour of training). <kbd> <img src="assets/stable-diffusion-finetuning-intel/dicoo_image.png"> </kbd> ## Conclusion Thanks to Hugging Face and Intel, you can now use Xeon CPU servers to generate high-quality images adapted to your business needs. They are generally more affordable and widely available than specialized hardware such as GPUs. Xeon CPUs can also be easily repurposed for other production tasks, from web servers to databases, making them a versatile and flexible choice for your IT infrastructure. Here are some resources to help you get started: * Diffusers [documentation](https://huggingface.co/docs/diffusers) * Optimum Intel [documentation](https://huggingface.co/docs/optimum/main/en/intel/inference) * [Intel IPEX](https://github.com/intel/intel-extension-for-pytorch) on GitHub * [Developer resources](https://www.intel.com/content/www/us/en/developer/partner/hugging-face.html) from Intel and Hugging Face. * Sapphire Rapids servers on [Intel Developer Cloud](https://www.intel.com/content/www/us/en/developer/tools/devcloud/services.html), [AWS](https://aws.amazon.com/about-aws/whats-new/2022/11/introducing-amazon-ec2-r7iz-instances/?nc1=h_ls) and [GCP](https://cloud.google.com/blog/products/compute/c3-machine-series-on-intel-sapphire-rapids-now-ga). If you have questions or feedback, we'd love to read them on the [Hugging Face forum](https://discuss.huggingface.co/). Thanks for reading!
5
0
hf_public_repos
hf_public_repos/blog/assisted-generation-support-gaudi.md
--- title: "Faster assisted generation support for Intel Gaudi" thumbnail: /blog/assets/assisted-generation-support-gaudi/thumbnail.png authors: - user: haimbarad guest: true org: Intel - user: neharaste guest: true org: Intel - user: joeychou guest: true org: Intel --- # Faster assisted generation support for Intel Gaudi As model sizes grow, Generative AI implementations require significant inference resources. This not only increases the cost per generation, but also increases the power consumption used to serve such requests. Inference optimizations for text generation are essential for reducing latency, infrastructure costs, and power consumption. This can lead to an improved user experience and increased efficiency in text generation tasks. Assisted decoding is a popular method for speeding up text generation. We adapted and optimized it for Intel Gaudi, which delivers similar performance as Nvidia H100 GPUs as shown in [a previous post](https://huggingface.co/blog/bridgetower), while its price is in the same ballpark as Nvidia A100 80GB GPUs. This work is now part of Optimum Habana, which extends various Hugging Face libraries like Transformers and Diffusers so that your AI workflows are fully optimized for Intel Gaudi processors. ## Speculative Sampling - Assisted Decoding Speculative sampling is a technique used to speed up text generation. It works by generating a draft model that generates K tokens, which are then evaluated in the target model. If the draft model is rejected, the target model is used to generate the next token. This process repeats. By using speculative sampling, we can improve the speed of text generation and achieve similar sampling quality as autoregressive sampling. The technique allows us to specify a draft model when generating text. This method has been shown to provide speedups of about 2x for large transformer-based models. Overall, these techniques can accelerate text generation and improve performance on Intel Gaudi processors. However, the draft model and target model have different sizes that would be represented in a KV cache, so the challenge is to take advantage of separate optimization strategies simultaneously. For this article, we assume a quantized model and leverage KV caching together with Speculative Sampling. Note that each model has its own KV cache, and the draft model is used to generate K tokens, which are then evaluated in the target model. The target model is used to generate the next token when the draft model is rejected. The draft model is used to generate the next K tokens, and the process repeats. Note that the authors [2] prove that the target distribution is recovered when performing speculative sampling - this guarantees the same sampling quality as autoregressive sampling on the target itself. Therefore, the situations where not leveraging speculative sampling is not worthwhile have to do with the case where there are not enough savings in the relative size of the draft model or the acceptance rate of the draft model is not high enough to benefit from the smaller size of the draft model. There is a technique similar to Speculative Sampling, known as Assisted Generation. This was developed independently around the same time [3]. The author integrated this method into Hugging Face Transformers, and the *.generate()* call now has an optional *assistant_model* parameter to enable this method. ## Usage & Experiments The usage of Assisted Generation is straightforward. An example is provided [here](https://github.com/huggingface/optimum-habana/tree/main/examples/text-generation#run-speculative-sampling-on-gaudi). As would be expected, the parameter `--assistant_model` is used to specify the draft model. The draft model is used to generate K tokens, which are then evaluated in the target model. The target model is used to generate the next token when the draft model is rejected. The draft model is used to generate the next K tokens, and the process repeats. The acceptance rate of the draft model is partly dependent on the input text. Typically, we have seen speed-ups of about 2x for large transformer-based models. ## Conclusion Accelerating text generation with Gaudi with assisted generation is now supported and easy to use. This can be used to improve performance on Intel Gaudi processors. The method is based on Speculative Sampling, which has been shown to be effective in improving performance on large transformer-based models. # References [1] N. Shazeer, “Fast Transformer Decoding: One Write-Head is All You Need,” Nov. 2019. arXiv:1911.02150. [2] C. Chen, S. Borgeaud, G. Irving, J.B. Lespiau, L. Sifre, and J. Jumper, “Accelerating Large Language Model Decoding with Speculative Sampling,” Feb. 2023. arXiv:2302.01318. [3] J. Gante, “Assisted Generation: a new direction toward low-latency text generation,” May 2023, https://huggingface.co/blog/assisted-generation.
6
0
hf_public_repos
hf_public_repos/blog/regions.md
--- title: "Introducing Storage Regions on the HF Hub" thumbnail: /blog/assets/172_regions/thumbnail.png authors: - user: coyotte508 - user: rtrm - user: XciD - user: michellehbn - user: violette - user: julien-c --- # Introducing Storage Regions on the Hub As part of our [Enterprise Hub](https://huggingface.co/enterprise) plan, we recently released support for **Storage Regions**. Regions let you decide where your org's models and datasets will be stored. This has two main benefits, which we'll briefly go over in this blog post: - **Regulatory and legal compliance**, and more generally, better digital sovereignty - **Performance** (improved download and upload speeds and latency) Currently we support the following regions: - US 🇺🇸 - EU 🇪🇺 - coming soon: Asia-Pacific 🌏 But first, let's see how to setup this feature in your organization's settings 🔥 ## Org settings If your organization is not an Enterprise Hub org yet, you will see the following screen: ![](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/hub/storage-regions/no-feature.png) As soon as you subscribe, you will be able to see the Regions settings page: ![](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/hub/storage-regions/feature-annotated.png) On that page you can see: - an audit of where your orgs' repos are currently located - dropdowns to select where your repos will be created ## Repository Tag Any repo (model or dataset) stored in a non-default location will display its Region directly as a tag. That way your organization's members can see at a glance where repos are located. ![](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/hub/storage-regions/tag-on-repo.png) ## Regulatory and legal compliance In many regulated industries, you may have a requirement to store your data in a specific area. For companies in the EU, that means you can use the Hub to build ML in a GDPR compliant way: with datasets, models and inference endpoints all stored within EU data centers. If you are an Enterprise Hub customer and have further questions about this, please get in touch! ## Performance Storing your models or your datasets closer to your team and infrastructure also means significantly improved performance, for both uploads and downloads. This makes a big difference considering model weights and dataset files are usually very large. ![](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/hub/storage-regions/upload-speed.png) As an example, if you are located in Europe and store your repositories in the EU region, you can expect to see ~4-5x faster upload and download speeds vs. if they were stored in the US.
7
0
hf_public_repos
hf_public_repos/blog/image-similarity.md
--- title: Image Similarity with Hugging Face Datasets and Transformers thumbnail: /blog/assets/image_similarity/thumbnail.png authors: - user: sayakpaul --- # Image Similarity with Hugging Face Datasets and Transformers <a target="_blank" href="https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/image_similarity.ipynb"> <img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/> </a> In this post, you'll learn to build an image similarity system with 🤗 Transformers. Finding out the similarity between a query image and potential candidates is an important use case for information retrieval systems, such as reverse image search, for example. All the system is trying to answer is that, given a _query_ image and a set of _candidate_ images, which images are the most similar to the query image. We'll leverage the [🤗 `datasets` library](https://huggingface.co/docs/datasets/) as it seamlessly supports parallel processing which will come in handy when building this system. Although the post uses a ViT-based model ([`nateraw/vit-base-beans`](https://huggingface.co/nateraw/vit-base-beans)) and a particular dataset ([Beans](https://huggingface.co/datasets/beans)), it can be extended to use other models supporting vision modality and other image datasets. Some notable models you could try: * [Swin Transformer](https://huggingface.co/docs/transformers/model_doc/swin) * [ConvNeXT](https://huggingface.co/docs/transformers/model_doc/convnext) * [RegNet](https://huggingface.co/docs/transformers/model_doc/regnet) Also, the approach presented in the post can potentially be extended to other modalities as well. To study the fully working image-similarity system, you can refer to the Colab Notebook linked at the beginning. ## How do we define similarity? To build this system, we first need to define how we want to compute the similarity between two images. One widely popular practice is to compute dense representations (embeddings) of the given images and then use the [cosine similarity metric](https://en.wikipedia.org/wiki/Cosine_similarity) to determine how similar the two images are. For this post, we'll use “embeddings” to represent images in vector space. This gives us a nice way to meaningfully compress the high-dimensional pixel space of images (224 x 224 x 3, for example) to something much lower dimensional (768, for example). The primary advantage of doing this is the reduced computation time in the subsequent steps. <div align="center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/blog/image_similarity/embeddings.png" width=700/> </div> ## Computing embeddings To compute the embeddings from the images, we'll use a vision model that has some understanding of how to represent the input images in the vector space. This type of model is also commonly referred to as image encoder. For loading the model, we leverage the [`AutoModel` class](https://huggingface.co/docs/transformers/model_doc/auto#transformers.AutoModel). It provides an interface for us to load any compatible model checkpoint from the Hugging Face Hub. Alongside the model, we also load the processor associated with the model for data preprocessing. ```py from transformers import AutoImageProcessor, AutoModel model_ckpt = "nateraw/vit-base-beans" processor = AutoImageProcessor.from_pretrained(model_ckpt) model = AutoModel.from_pretrained(model_ckpt) ``` In this case, the checkpoint was obtained by fine-tuning a [Vision Transformer based model](https://huggingface.co/google/vit-base-patch16-224-in21k) on the [`beans` dataset](https://huggingface.co/datasets/beans). Some questions that might arise here: **Q1**: Why did we not use `AutoModelForImageClassification`? This is because we want to obtain dense representations of the images and not discrete categories, which are what `AutoModelForImageClassification` would have provided. **Q2**: Why this checkpoint in particular? As mentioned earlier, we're using a specific dataset to build the system. So, instead of using a generalist model (like the [ones trained on the ImageNet-1k dataset](https://huggingface.co/models?dataset=dataset:imagenet-1k&sort=downloads), for example), it's better to use a model that has been fine-tuned on the dataset being used. That way, the underlying model better understands the input images. **Note** that you can also use a checkpoint that was obtained through self-supervised pre-training. The checkpoint doesn't necessarily have to come from supervised learning. In fact, if pre-trained well, self-supervised models can [yield](https://ai.facebook.com/blog/dino-paws-computer-vision-with-self-supervised-transformers-and-10x-more-efficient-training/) impressive retrieval performance. Now that we have a model for computing the embeddings, we need some candidate images to query against. ## Loading a dataset for candidate images In some time, we'll be building hash tables mapping the candidate images to hashes. During the query time, we'll use these hash tables. We'll talk more about hash tables in the respective section but for now, to have a set of candidate images, we will use the `train` split of the [`beans` dataset](https://huggingface.co/datasets/beans). ```py from datasets import load_dataset dataset = load_dataset("beans") ``` This is how a single sample from the training split looks like: <div align="center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/blog/image_similarity/beans.png" width=600/> </div> The dataset has three features: ```py dataset["train"].features >>> {'image_file_path': Value(dtype='string', id=None), 'image': Image(decode=True, id=None), 'labels': ClassLabel(names=['angular_leaf_spot', 'bean_rust', 'healthy'], id=None)} ``` To demonstrate the image similarity system, we'll use 100 samples from the candidate image dataset to keep the overall runtime short. ```py num_samples = 100 seed = 42 candidate_subset = dataset["train"].shuffle(seed=seed).select(range(num_samples)) ``` ## The process of finding similar images Below, you can find a pictorial overview of the process underlying fetching similar images. <div align="center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/blog/image_similarity/fetch-similar-process.png"> </div> Breaking down the above figure a bit, we have: 1. Extract the embeddings from the candidate images (`candidate_subset`), storing them in a matrix. 2. Take a query image and extract its embeddings. 3. Iterate over the embedding matrix (computed in step 1) and compute the similarity score between the query embedding and the current candidate embeddings. We usually maintain a dictionary-like mapping maintaining a correspondence between some identifier of the candidate image and the similarity scores. 4. Sort the mapping structure w.r.t the similarity scores and return the underlying identifiers. We use these identifiers to fetch the candidate samples. We can write a simple utility and `map()` it to our dataset of candidate images to compute the embeddings efficiently. ```py import torch def extract_embeddings(model: torch.nn.Module): """Utility to compute embeddings.""" device = model.device def pp(batch): images = batch["image"] # `transformation_chain` is a compostion of preprocessing # transformations we apply to the input images to prepare them # for the model. For more details, check out the accompanying Colab Notebook. image_batch_transformed = torch.stack( [transformation_chain(image) for image in images] ) new_batch = {"pixel_values": image_batch_transformed.to(device)} with torch.no_grad(): embeddings = model(**new_batch).last_hidden_state[:, 0].cpu() return {"embeddings": embeddings} return pp ``` And we can map `extract_embeddings()` like so: ```py device = "cuda" if torch.cuda.is_available() else "cpu" extract_fn = extract_embeddings(model.to(device)) candidate_subset_emb = candidate_subset.map(extract_fn, batched=True, batch_size=batch_size) ``` Next, for convenience, we create a list containing the identifiers of the candidate images. ```py candidate_ids = [] for id in tqdm(range(len(candidate_subset_emb))): label = candidate_subset_emb[id]["labels"] # Create a unique indentifier. entry = str(id) + "_" + str(label) candidate_ids.append(entry) ``` We'll use the matrix of the embeddings of all the candidate images for computing the similarity scores with a query image. We have already computed the candidate image embeddings. In the next cell, we just gather them together in a matrix. ```py all_candidate_embeddings = np.array(candidate_subset_emb["embeddings"]) all_candidate_embeddings = torch.from_numpy(all_candidate_embeddings) ``` We'll use [cosine similarity](https://en.wikipedia.org/wiki/Cosine_similarity) to compute the similarity score in between two embedding vectors. We'll then use it to fetch similar candidate samples given a query sample. ```py def compute_scores(emb_one, emb_two): """Computes cosine similarity between two vectors.""" scores = torch.nn.functional.cosine_similarity(emb_one, emb_two) return scores.numpy().tolist() def fetch_similar(image, top_k=5): """Fetches the `top_k` similar images with `image` as the query.""" # Prepare the input query image for embedding computation. image_transformed = transformation_chain(image).unsqueeze(0) new_batch = {"pixel_values": image_transformed.to(device)} # Comute the embedding. with torch.no_grad(): query_embeddings = model(**new_batch).last_hidden_state[:, 0].cpu() # Compute similarity scores with all the candidate images at one go. # We also create a mapping between the candidate image identifiers # and their similarity scores with the query image. sim_scores = compute_scores(all_candidate_embeddings, query_embeddings) similarity_mapping = dict(zip(candidate_ids, sim_scores)) # Sort the mapping dictionary and return `top_k` candidates. similarity_mapping_sorted = dict( sorted(similarity_mapping.items(), key=lambda x: x[1], reverse=True) ) id_entries = list(similarity_mapping_sorted.keys())[:top_k] ids = list(map(lambda x: int(x.split("_")[0]), id_entries)) labels = list(map(lambda x: int(x.split("_")[-1]), id_entries)) return ids, labels ``` ## Perform a query Given all the utilities, we're equipped to do a similarity search. Let's have a query image from the `test` split of the `beans` dataset: ```py test_idx = np.random.choice(len(dataset["test"])) test_sample = dataset["test"][test_idx]["image"] test_label = dataset["test"][test_idx]["labels"] sim_ids, sim_labels = fetch_similar(test_sample) print(f"Query label: {test_label}") print(f"Top 5 candidate labels: {sim_labels}") ``` Leads to: ``` Query label: 0 Top 5 candidate labels: [0, 0, 0, 0, 0] ``` Seems like our system got the right set of similar images. When visualized, we'd get: <div align="center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/blog/image_similarity/results_one.png"> </div> ## Further extensions and conclusions We now have a working image similarity system. But in reality, you'll be dealing with a lot more candidate images. Taking that into consideration, our current procedure has got multiple drawbacks: * If we store the embeddings as is, the memory requirements can shoot up quickly, especially when dealing with millions of candidate images. The embeddings are 768-d in our case, which can still be relatively high in the large-scale regime. * Having high-dimensional embeddings have a direct effect on the subsequent computations involved in the retrieval part. If we can somehow reduce the dimensionality of the embeddings without disturbing their meaning, we can still maintain a good trade-off between speed and retrieval quality. The [accompanying Colab Notebook](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/image_similarity.ipynb) of this post implements and demonstrates utilities for achieving this with random projection and locality-sensitive hashing. 🤗 Datasets offers direct integrations with [FAISS](https://github.com/facebookresearch/faiss) which further simplifies the process of building similarity systems. Let's say you've already extracted the embeddings of the candidate images (the `beans` dataset) and stored them inside a feature called `embeddings`. You can now easily use the [`add_faiss_index()`](https://huggingface.co/docs/datasets/v2.7.1/en/package_reference/main_classes#datasets.Dataset.add_faiss_index) of the dataset to build a dense index: ```py dataset_with_embeddings.add_faiss_index(column="embeddings") ``` Once the index is built, `dataset_with_embeddings` can be used to retrieve the nearest examples given query embeddings with [`get_nearest_examples()`](https://huggingface.co/docs/datasets/v2.7.1/en/package_reference/main_classes#datasets.Dataset.get_nearest_examples): ```py scores, retrieved_examples = dataset_with_embeddings.get_nearest_examples( "embeddings", qi_embedding, k=top_k ) ``` The method returns scores and corresponding candidate examples. To know more, you can check out the [official documentation](https://huggingface.co/docs/datasets/faiss_es) and [this notebook](https://colab.research.google.com/gist/sayakpaul/5b5b5a9deabd3c5d8cb5ef8c7b4bb536/image_similarity_faiss.ipynb). Finally, you can try out the following Space that builds a mini image similarity application: <script type="module" src="https://gradio.s3-us-west-2.amazonaws.com/3.12.0/gradio.js"></script> <gradio-app theme_mode="light" space="sayakpaul/fetch-similar-images"></gradio-app> In this post, we ran through a quickstart for building image similarity systems. If you found this post interesting, we highly recommend building on top of the concepts we discussed here so you can get more comfortable with the inner workings. Still looking to learn more? Here are some additional resources that might be useful for you: * [Faiss: A library for efficient similarity search](https://engineering.fb.com/2017/03/29/data-infrastructure/faiss-a-library-for-efficient-similarity-search/) * [ScaNN: Efficient Vector Similarity Search](http://ai.googleblog.com/2020/07/announcing-scann-efficient-vector.html) * [Integrating Image Searchers within Mobile Applications](https://www.tensorflow.org/lite/inference_with_metadata/task_library/image_searcher)
8
0
hf_public_repos
hf_public_repos/blog/rwkv.md
--- title: "Introducing RWKV - An RNN with the advantages of a transformer" thumbnail: /blog/assets/142_rwkv/rwkv_thumbnail.png authors: - user: BLinkDL guest: true - user: Hazzzardous guest: true - user: sgugger - user: ybelkada --- # Introducing RWKV - An RNN with the advantages of a transformer ChatGPT and chatbot-powered applications have captured significant attention in the Natural Language Processing (NLP) domain. The community is constantly seeking strong, reliable and open-source models for their applications and use cases. The rise of these powerful models stems from the democratization and widespread adoption of transformer-based models, first introduced by Vaswani et al. in 2017. These models significantly outperformed previous SoTA NLP models based on Recurrent Neural Networks (RNNs), which were considered dead after that paper. Through this blogpost, we will introduce the integration of a new architecture, RWKV, that combines the advantages of both RNNs and transformers, and that has been recently integrated into the Hugging Face [transformers](https://github.com/huggingface/transformers) library. ### Overview of the RWKV project The RWKV project was kicked off and is being led by [Bo Peng](https://github.com/BlinkDL), who is actively contributing and maintaining the project. The community, organized in the official discord channel, is constantly enhancing the project’s artifacts on various topics such as performance (RWKV.cpp, quantization, etc.), scalability (dataset processing & scrapping) and research (chat-fine tuning, multi-modal finetuning, etc.). The GPUs for training RWKV models are donated by Stability AI. You can get involved by joining the [official discord channel](https://discord.gg/qt9egFA7ve) and learn more about the general ideas behind RWKV in these two blogposts: https://johanwind.github.io/2023/03/23/rwkv_overview.html / https://johanwind.github.io/2023/03/23/rwkv_details.html ### Transformer Architecture vs RNNs The RNN architecture is one of the first widely used Neural Network architectures for processing a sequence of data, contrary to classic architectures that take a fixed size input. It takes as input the current “token” (i.e. current data point of the datastream), the previous “state”, and computes the predicted next token, and the predicted next state. The new state is then used to compute the prediction of the next token, and so on. A RNN can be also used in different “modes”, therefore enabling the possibility of applying RNNs on different scenarios, as denoted by [Andrej Karpathy’s blogpost](https://karpathy.github.io/2015/05/21/rnn-effectiveness/), such as one-to-one (image-classification), one-to-many (image captioning), many-to-one (sequence classification), many-to-many (sequence generation), etc. | ![rnn_diagram](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/blog/142_rwkv/RNN-scheme.png) | |:--:| | <b>Overview of possible configurations of using RNNs. Source: <a href="https://karpathy.github.io/2015/05/21/rnn-effectiveness/" rel="noopener" target="_blank" >Andrej Karpathy's blogpost</a> </b>| Because RNNs use the same weights to compute predictions at every step, they struggle to memorize information for long-range sequences due to the vanishing gradient issue. Efforts have been made to address this limitation by introducing new architectures such as LSTMs or GRUs. However, the transformer architecture proved to be the most effective thus far in resolving this issue. In the transformer architecture, the input tokens are processed simultaneously in the self-attention module. The tokens are first linearly projected into different spaces using the query, key and value weights. The resulting matrices are directly used to compute the attention scores (through softmax, as shown below), then multiplied by the value hidden states to obtain the final hidden states. This design enables the architecture to effectively mitigate the long-range sequence issue, and also perform faster inference and training compared to RNN models. | ![transformer_diagram](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/blog/142_rwkv/transformer-scheme.png) | |:--:| | <b>Formulation of attention scores in transformer models. Source: <a href="https://jalammar.github.io/illustrated-transformer/" rel="noopener" target="_blank" >Jay Alammar's blogpost</a> </b>| | ![rwkv_attention_formula](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/blog/142_rwkv/RWKV-formula.png)| |:--:| | <b>Formulation of attention scores in RWKV models. Source: <a href="https://raw.githubusercontent.com/BlinkDL/RWKV-LM/main/RWKV-formula.png" rel="noopener" target="_blank" >RWKV blogpost</a> </b>| During training, Transformer architecture has several advantages over traditional RNNs and CNNs. One of the most significant advantages is its ability to learn contextual representations. Unlike the RNNs and CNNs, which process input sequences one word at a time, Transformer architecture processes input sequences as a whole. This allows it to capture long-range dependencies between words in the sequence, which is particularly useful for tasks such as language translation and question answering. During inference, RNNs have some advantages in speed and memory efficiency. These advantages include simplicity, due to needing only matrix-vector operations, and memory efficiency, as the memory requirements do not grow during inference. Furthermore, the computation speed remains the same with context window length due to how computations only act on the current token and the state. ## The RWKV architecture RWKV is inspired by [Apple’s Attention Free Transformer](https://machinelearning.apple.com/research/attention-free-transformer). The architecture has been carefully simplified and optimized such that it can be transformed into an RNN. In addition, a number of tricks has been added such as `TokenShift` & `SmallInitEmb` (the list of tricks is listed in [the README of the official GitHub repository](https://github.com/BlinkDL/RWKV-LM/blob/main/README.md#how-it-works)) to boost its performance to match GPT. Without these, the model wouldn't be as performant. For training, there is an infrastructure to scale the training up to 14B parameters as of now, and some issues have been iteratively fixed in RWKV-4 (latest version as of today), such as numerical instability. ### RWKV as a combination of RNNs and transformers How to combine the best of transformers and RNNs? The main drawback of transformer-based models is that it can become challenging to run a model with a context window that is larger than a certain value, as the attention scores are computed simultaneously for the entire sequence. RNNs natively support very long context lengths - only limited by the context length seen in training, but this can be extended to millions of tokens with careful coding. Currently, there are RWKV models trained on a context length of 8192 (`ctx8192`) and they are as fast as `ctx1024` models and require the same amount of RAM. The major drawbacks of traditional RNN models and how RWKV is different: 1. Traditional RNN models are unable to utilize very long contexts (LSTM can only manage ~100 tokens when used as a LM). However, RWKV can utilize thousands of tokens and beyond, as shown below: | ![rwkv_loss](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/blog/142_rwkv/RWKV-loss.png) | |:--:| | <b>LM loss with respect to different context lengths and model sizes. Source: <a href="https://raw.githubusercontent.com/BlinkDL/RWKV-LM/main/RWKV-ctxlen.png" rel="noopener" target="_blank" >RWKV original repository</a> </b>| 2. Traditional RNN models cannot be parallelized when training. RWKV is similar to a “linearized GPT” and it trains faster than GPT. By combining both advantages into a single architecture, the hope is that RWKV can grow to become more than the sum of its parts. ### RWKV attention formulation The model architecture is very similar to classic transformer-based models (i.e. an embedding layer, multiple identical layers, layer normalization, and a Causal Language Modeling head to predict the next token). The only difference is on the attention layer, which is completely different from the traditional transformer-based models. To gain a more comprehensive understanding of the attention layer, we recommend to delve into the detailed explanation provided in [a blog post by Johan Sokrates Wind](https://johanwind.github.io/2023/03/23/rwkv_details.html). ### Existing checkpoints #### Pure language models: RWKV-4 models Most adopted RWKV models range from ~170M parameters to 14B parameters. According to the RWKV overview [blog post](https://johanwind.github.io/2023/03/23/rwkv_overview.html), these models have been trained on the Pile dataset and evaluated against other SoTA models on different benchmarks, and they seem to perform quite well, with very comparable results against them. | ![rwkv_loss](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/blog/142_rwkv/RWKV-eval.png) | |:--:| | <b>RWKV-4 compared to other common architectures. Source: <a href="https://johanwind.github.io/2023/03/23/rwkv_overview.html" rel="noopener" target="_blank" >Johan Wind's blogpost</a> </b>| #### Instruction Fine-tuned/Chat Version: RWKV-4 Raven Bo has also trained a “chat” version of the RWKV architecture, the RWKV-4 Raven model. It is a RWKV-4 pile (RWKV model pretrained on The Pile dataset) model fine-tuned on ALPACA, CodeAlpaca, Guanaco, GPT4All, ShareGPT and more. The model is available in multiple versions, with models trained on different languages (English only, English + Chinese + Japanese, English + Japanese, etc.) and different sizes (1.5B parameters, 7B parameters, 14B parameters). All the HF converted models are available on Hugging Face Hub, in the [`RWKV` organization](https://huggingface.co/RWKV). ## 🤗 Transformers integration The architecture has been added to the `transformers` library thanks to [this Pull Request](https://github.com/huggingface/transformers/pull/22797). As of the time of writing, you can use it by installing `transformers` from source, or by using the `main` branch of the library. The architecture is tightly integrated with the library, and you can use it as you would any other architecture. Let us walk through some examples below. ### Text Generation Example To generate text given an input prompt you can use `pipeline` to generate text: ```python from transformers import pipeline model_id = "RWKV/rwkv-4-169m-pile" prompt = "\nIn a shocking finding, scientist discovered a herd of dragons living in a remote, previously unexplored valley, in Tibet. Even more surprising to the researchers was the fact that the dragons spoke perfect Chinese." pipe = pipeline("text-generation", model=model_id) print(pipe(prompt, max_new_tokens=20)) >>> [{'generated_text': '\nIn a shocking finding, scientist discovered a herd of dragons living in a remote, previously unexplored valley, in Tibet. Even more surprising to the researchers was the fact that the dragons spoke perfect Chinese.\n\nThe researchers found that the dragons were able to communicate with each other, and that they were'}] ``` Or you can run and start from the snippet below: ```python import torch from transformers import AutoModelForCausalLM, AutoTokenizer model = AutoModelForCausalLM.from_pretrained("RWKV/rwkv-4-169m-pile") tokenizer = AutoTokenizer.from_pretrained("RWKV/rwkv-4-169m-pile") prompt = "\nIn a shocking finding, scientist discovered a herd of dragons living in a remote, previously unexplored valley, in Tibet. Even more surprising to the researchers was the fact that the dragons spoke perfect Chinese." inputs = tokenizer(prompt, return_tensors="pt") output = model.generate(inputs["input_ids"], max_new_tokens=20) print(tokenizer.decode(output[0].tolist())) >>> In a shocking finding, scientist discovered a herd of dragons living in a remote, previously unexplored valley, in Tibet. Even more surprising to the researchers was the fact that the dragons spoke perfect Chinese.\n\nThe researchers found that the dragons were able to communicate with each other, and that they were ``` ### Use the raven models (chat models) You can prompt the chat model in the alpaca style, here is an example below: ```python from transformers import AutoTokenizer, AutoModelForCausalLM model_id = "RWKV/rwkv-raven-1b5" model = AutoModelForCausalLM.from_pretrained(model_id).to(0) tokenizer = AutoTokenizer.from_pretrained(model_id) question = "Tell me about ravens" prompt = f"### Instruction: {question}\n### Response:" inputs = tokenizer(prompt, return_tensors="pt").to(0) output = model.generate(inputs["input_ids"], max_new_tokens=100) print(tokenizer.decode(output[0].tolist(), skip_special_tokens=True)) >>> ### Instruction: Tell me about ravens ### Response: RAVENS are a type of bird that is native to the Middle East and North Africa. They are known for their intelligence, adaptability, and their ability to live in a variety of environments. RAVENS are known for their intelligence, adaptability, and their ability to live in a variety of environments. They are known for their intelligence, adaptability, and their ability to live in a variety of environments. ``` According to Bo, better instruction techniques are detailed in [this discord message (make sure to join the channel before clicking)](https://discord.com/channels/992359628979568762/1083107245971226685/1098533896355848283) | ![discord_message](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/blog/142_rwkv/RWKV%20instructions.png) | ### Weights conversion Any user could easily convert the original RWKV weights to the HF format by simply running the conversion script provided in the `transformers` library. First, push the "raw" weights to the Hugging Face Hub (let's denote that repo as `RAW_HUB_REPO`, and the raw file `RAW_FILE`), then run the conversion script: ```bash python convert_rwkv_checkpoint_to_hf.py --repo_id RAW_HUB_REPO --checkpoint_file RAW_FILE --output_dir OUTPUT_DIR ``` If you want to push the converted model on the Hub (let's say, under `dummy_user/converted-rwkv`), first forget to log in with `huggingface-cli login` before pushing the model, then run: ```bash python convert_rwkv_checkpoint_to_hf.py --repo_id RAW_HUB_REPO --checkpoint_file RAW_FILE --output_dir OUTPUT_DIR --push_to_hub --model_name dummy_user/converted-rwkv ``` ## Future work ### Multi-lingual RWKV Bo is currently working on a multilingual corpus to train RWKV models. Recently a new multilingual tokenizer [has been released](https://twitter.com/BlinkDL_AI/status/1649839897208045573). ### Community-oriented and research projects The RWKV community is very active and working on several follow up directions, a list of cool projects can be find in a [dedicated channel on discord (make sure to join the channel before clicking the link)](https://discord.com/channels/992359628979568762/1068563033510653992). There is also a channel dedicated to research around this architecure, feel free to join and contribute! ### Model Compression and Acceleration Due to only needing matrix-vector operations, RWKV is an ideal candidate for non-standard and experimental computing hardware, such as photonic processors/accelerators. Therefore, the architecture can also naturally benefit from classic acceleration and compression techniques (such as [ONNX](https://github.com/harrisonvanderbyl/rwkv-onnx), 4-bit/8-bit quantization, etc.), and we hope this will be democratized for developers and practitioners together with the transformers integration of the architecture. RWKV can also benefit from the acceleration techniques proposed by [`optimum`](https://github.com/huggingface/optimum) library in the near future. Some of these techniques are highlighted in the [`rwkv.cpp` repository](https://github.com/saharNooby/rwkv.cpp) or [`rwkv-cpp-cuda` repository](https://github.com/harrisonvanderbyl/rwkv-cpp-cuda). ## Acknowledgements The Hugging Face team would like to thank Bo and RWKV community for their time and for answering our questions about the architecture. We would also like to thank them for their help and support and we look forward to see more adoption of RWKV models in the HF ecosystem. We also would like to acknowledge the work of [Johan Wind](https://twitter.com/johanwind) for his blogpost on RWKV, which helped us a lot to understand the architecture and its potential. And finally, we would like to highlight anf acknowledge the work of [ArEnSc](https://github.com/ArEnSc) for starting over the initial `transformers` PR. Also big kudos to [Merve Noyan](https://huggingface.co/merve), [Maria Khalusova](https://huggingface.co/MariaK) and [Pedro Cuenca](https://huggingface.co/pcuenq) for kindly reviewing this blogpost to make it much better! ## Citation If you use RWKV for your work, please use [the following `cff` citation](https://github.com/BlinkDL/RWKV-LM/blob/main/CITATION.cff).
9
0
hf_public_repos/autotrain-advanced/docs
hf_public_repos/autotrain-advanced/docs/source/_toctree.yml
- sections: - local: index title: 🤗 AutoTrain - local: cost title: How much does it cost? - local: support title: Get help and support - local: faq title: Frequently Asked Questions title: Getting Started - sections: - local: quickstart_spaces title: Train on Spaces - local: quickstart_py title: Python SDK - local: quickstart title: Train Locally - local: config title: Config File title: Quickstart - sections: - local: tasks/llm_finetuning title: LLM Finetuning - local: tasks/text_classification_regression title: Text Classification/Regression - local: tasks/extractive_qa title: Extractive QA - local: tasks/sentence_transformer title: Sentence Transformer - local: tasks/image_classification_regression title: Image Classification / Regression - local: tasks/object_detection title: Object Detection - local: tasks/seq2seq title: Seq2Seq - local: tasks/token_classification title: Token Classification - local: tasks/tabular title: Tabular title: Tasks - sections: - local: col_map title: Understanding Column Mapping - local: autotrain_api title: AutoTrain API title: Miscellaneous
0
0
hf_public_repos/autotrain-advanced/docs
hf_public_repos/autotrain-advanced/docs/source/starting_ui.bck
# Starting the UI The AutoTrain UI can be started in multiple ways depending on your needs. We offer UI on Hugging Face Spaces, Colab and locally! ## Hugging Face Spaces To start the UI on Hugging Face Spaces, you can simply click on the following link: [![Deploy on Spaces](https://huggingface.co/datasets/huggingface/badges/resolve/main/deploy-on-spaces-md.svg)](https://huggingface.co/login?next=/spaces/autotrain-projects/autotrain-advanced?duplicate=true) Please make sure you keep the space private and attach appropriate hardware to the space. You can also read more about AutoTrain on the homepage and follow the link there to start your own training instance on Hugging Face Spaces. [Click here](https://huggingface.co/autotrain) to visit the homepage. ## Colab To start the UI on Colab, you can simply click on the following link: [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/huggingface/autotrain-advanced/blob/main/colabs/AutoTrain.ipynb) Please note, to run the app on Colab, you will need an ngrok token. You can get one by signing up for free on [ngrok](https://ngrok.com/). This is because Colab does not allow exposing ports to the internet directly. ## Locally To run the autotrain app locally, install autotrain-advanced python package: ```bash $ pip install autotrain-advanced ``` and then run the following command: ```bash $ export HF_TOKEN=your_hugging_face_write_token $ autotrain app --host 127.0.0.1 --port 8000 ``` This will start the app on `http://127.0.0.1:8000`. AutoTrain doesn't install pytorch, torchaudio, torchvision, or any other dependencies. You will need to install them separately. It is thus recommended to use conda environment: ```bash $ conda create -n autotrain python=3.10 $ conda activate autotrain $ pip install autotrain-advanced $ conda install pytorch torchvision torchaudio pytorch-cuda=12.1 -c pytorch -c nvidia $ conda install -c "nvidia/label/cuda-12.1.0" cuda-nvcc $ conda install xformers -c xformers $ python -m nltk.downloader punkt $ pip install flash-attn --no-build-isolation $ pip install deepspeed $ export HF_TOKEN=your_hugging_face_write_token $ autotrain app --host 127.0.0.1 --port 8000 ``` In case of any issues, please report on the [GitHub issues](https://github.com/huggingface/autotrain-advanced/).
1
0
hf_public_repos/autotrain-advanced/docs
hf_public_repos/autotrain-advanced/docs/source/support.mdx
# Help and Support If you need assistance with AutoTrain Advanced or have questions about your projects, you can reach out through several dedicated support channels. We're here to help you navigate any issues you encounter, from technical queries to billing concerns. Below are the best ways to get support: - For technical support or to report a bug, you can [create an issue](https://github.com/huggingface/autotrain-advanced/issues/new) directly in the AutoTrain Advanced GitHub repository. GitHub repo is ideal for tracking bugs, requesting features, or getting help with troubleshooting problems. When submitting an issue, please include all the details in question to help us provide the most relevant support quickly. - [Ask in the Hugging Face Forum](https://discuss.huggingface.co/c/autotrain/16). This space is perfect for asking questions, sharing your experiences, or discussing AutoTrain with other users and the Hugging Face team. The forum is a great resource for getting advice, learning best practices, and connecting with other machine learning practitioners. - For enterprise users or specific inquiries related to billing, please [email us](mailto:[email protected]) directly. This channel ensures that your more sensitive or account-specific issues are handled appropriately and confidentially. When emailing, please provide your username and project name so we can assist you efficiently. Please note: e-mail support is only available for pro/enterprise users or those with specific queries about billing. By utilizing these support channels, you can ensure that any hurdles you face while using AutoTrain Advanced are addressed promptly, allowing you to focus on achieving your project goals. Whether you're a beginner or an experienced user, we are here to support your journey in AI model training.
2
0
hf_public_repos/autotrain-advanced/docs
hf_public_repos/autotrain-advanced/docs/source/cost.mdx
# How much does it cost? AutoTrain offers an accessible approach to model training, providing deployable models with just a few clicks. Understanding the cost involved is essential to planning and executing your projects efficiently. ## Local Usage When you choose to use AutoTrain locally on your own hardware, there is no cost. This option is ideal for those who prefer to manage their own infrastructure and do not require the scalability that cloud resources offer. ## Using AutoTrain on Hugging Face Spaces **Pay-As-You-Go**: Costs for using AutoTrain in Hugging Face Spaces are based on the computing resources you consume. This flexible pricing structure ensures you only pay for what you use, making it cost-effective and scalable for projects of any size. **Ownership and Portability**: Unlike some other platforms, AutoTrain does not retain ownership of your models. Once training is complete, you are free to download and deploy your models wherever you choose, providing flexibility and control over your all your assets. ### Pricing Details **Resource-Based Billing**: Charges are accrued per minute according to the type of hardware utilized during training. This means you can scale your resource usage based on the complexity and needs of your projects. For a detailed breakdown of the costs associated with using Hugging Face Spaces, please refer to the [pricing](https://huggingface.co/pricing#spaces) section on our website. To access the paid features of AutoTrain, you must have a valid payment method on file. You can manage your payment options and view your billing information in the [billing section of your Hugging Face account settings.](https://huggingface.co/settings/billing) By offering both free and flexible paid options, AutoTrain ensures that users can choose the most suitable model training solution for their needs, whether they are experimenting on a local machine or scaling up operations on Hugging Face Spaces.
3
0
hf_public_repos/autotrain-advanced/docs
hf_public_repos/autotrain-advanced/docs/source/col_map.mdx
# Understanding Column Mapping Column mapping is a critical setup process in AutoTrain that informs the system about the roles of different columns in your dataset. Whether it's a tabular dataset, text classification data, or another type, the need for precise column mapping ensures that AutoTrain processes each dataset element correctly. ## How Column Mapping Works AutoTrain has no way of knowing what the columns in your dataset represent. AutoTrain requires a clear understanding of each column's function within your dataset to train models effectively. This is managed through a straightforward mapping system in the user interface, represented as a dictionary. Here's a typical example: ``` {"text": "text", "label": "target"} ``` In this example, the `text` column in your dataset corresponds to the text data AutoTrain uses for processing, and the `target` column is treated as the label for training. But let's not get confused! AutoTrain has a way to understand what each column in your dataset represents. If your data is already in AutoTrain format, you dont need to change column mappings. If not, you can easily map the columns in your dataset to the correct AutoTrain format. In the UI, you will see column mapping as a dictionary: ``` {"text": "text", "label": "target"} ``` Here, the column `text` in your dataset is mapped to the AutoTrain column `text`, and the column `target` in your dataset is mapped to the AutoTrain column `label`. Let's say you are training a text classification model and your dataset has the following columns: ``` full_text, target_sentiment "this movie is great", positive "this movie is bad", negative ``` You can map these columns to the AutoTrain format as follows: ``` {"text": "full_text", "label": "target_sentiment"} ``` If your dataset has the columns: `text` and `label`, you don't need to change the column mapping. Let's take a look at column mappings for each task: ## LLM Note: For all LLM tasks, if the text column(s) is not formatted i.e. if contains samples in chat format (dict or json), then you should use `chat_template` parameter. Read more about it in LLM Parameters Section. ### SFT / Generic Trainer ``` {"text": "text"} ``` `text`: The column in your dataset that contains the text data. ### Reward Trainer ``` {"text": "text", "rejected_text": "rejected_text"} ``` `text`: The column in your dataset that contains the text data. `rejected_text`: The column in your dataset that contains the rejected text data. ### DPO / ORPO Trainer ``` {"prompt": "prompt", "text": "text", "rejected_text": "rejected_text"} ``` `prompt`: The column in your dataset that contains the prompt data. `text`: The column in your dataset that contains the text data. `rejected_text`: The column in your dataset that contains the rejected text data. ## Text Classification & Regression, Seq2Seq For text classification and regression, the column mapping should be as follows: ``` {"text": "dataset_text_column", "label": "dataset_target_column"} ``` `text`: The column in your dataset that contains the text data. `label`: The column in your dataset that contains the target variable. ## Token Classification ``` {"text": "tokens", "label": "tags"} ``` `text`: The column in your dataset that contains the tokens. These tokens must be a list of strings. `label`: The column in your dataset that contains the tags. These tags must be a list of strings. For token classification, if you are using a CSV, make sure that the columns are stringified lists. ## Tabular Classification & Regression ``` {"id": "id", "label": ["target"]} ``` `id`: The column in your dataset that contains the unique identifier for each row. `label`: The column in your dataset that contains the target variable. This should be a list of strings. For a single target column, you can pass a list with a single element. For multiple target columns, e.g. a multi label classification task, you can pass a list with multiple elements. # Image Classification For image classification, the column mapping should be as follows: ``` {"image": "image_column", "label": "label_column"} ``` Image classification requires column mapping only when you are using a dataset from Hugging Face Hub. For uploaded datasets, leave column mapping as it is. # Sentence Transformers For all sentence transformers tasks, one needs to map columns to `sentence1_column`, `sentence2_column`, `sentence3_column` & `target_column` column. Not all columns need to be mapped for all trainers of sentence transformers. ## `pair`: ``` {"sentence1_column": "anchor", "sentence2_column": "positive"} ``` ## `pair_class`: ``` {"sentence1_column": "premise", "sentence2_column": "hypothesis", "target_column": "label"} ``` ## `pair_score`: ``` {"sentence1_column": "sentence1", "sentence2_column": "sentence2", "target_column": "score"} ``` ## `triplet`: ``` {"sentence1_column": "anchor", "sentence2_column": "positive", "sentence3_column": "negative"} ``` ## `qa`: ``` {"sentence1_column": "query", "sentence2_column": "answer"} ``` # Extractive Question Answering For extractive question answering, the column mapping should be as follows: ``` {"text": "context", "question": "question", "answer": "answers"} ``` where `answer` is a dictionary with keys `text` and `answer_start`. ## Ensuring Accurate Mapping To ensure your model trains correctly: - Verify Column Names: Double-check that the names used in the mapping dictionary accurately reflect those in your dataset. - Format Appropriately: Especially in token classification, ensure your data format matches expectations (e.g., lists of strings). - Update Mappings for New Datasets: Each new dataset might require its unique mappings based on its structure and the task at hand. By following these guidelines and using the provided examples as templates, you can effectively instruct AutoTrain on how to interpret and handle your data for various machine learning tasks. This process is fundamental for achieving optimal results from your model training endeavors.
4
0
hf_public_repos/autotrain-advanced/docs
hf_public_repos/autotrain-advanced/docs/source/quickstart.mdx
# Quickstart Guide for Local Training This quickstart is for local installation and usage. If you want to use AutoTrain on Hugging Face Spaces, please refer to the *AutoTrain on Hugging Face Spaces* section. You can install AutoTrain Advanced using pip: ```bash $ pip install autotrain-advanced ``` It is advised to install autotrain-advanced in a virtual environment to avoid any conflicts with other packages. Note: AutoTrain doesn't install pytorch, torchaudio, torchvision, or any other large dependencies. You will need to install them separately. ```bash $ conda create -n autotrain python=3.10 $ conda activate autotrain $ pip install autotrain-advanced $ conda install pytorch torchvision torchaudio pytorch-cuda=12.1 -c pytorch -c nvidia $ conda install -c "nvidia/label/cuda-12.1.0" cuda-nvcc $ conda install xformers -c xformers $ python -m nltk.downloader punkt $ pip install flash-attn --no-build-isolation # if you want to use flash-attn $ pip install deepspeed # if you want to use deepspeed ```` # Running AutoTrain User Interface (UI) To run the autotrain app locally, you can use the following command: ```bash $ export HF_TOKEN=your_hugging_face_write_token $ autotrain app --host 127.0.0.1 --port 8000 ``` This will start the app on `http://127.0.0.1:8000`. # Using AutoTrain Command Line Interface (CLI) It is also possible to use the CLI: ```bash $ export HF_TOKEN=your_hugging_face_write_token $ autotrain --help ``` This will show the CLI commands that can be used: ```bash usage: autotrain <command> [<args>] positional arguments: { app, llm, setup, api, text-classification, text-regression, image-classification, tabular, spacerunner, seq2seq, token-classification } commands options: -h, --help show this help message and exit --version, -v Display AutoTrain version --config CONFIG Optional configuration file For more information about a command, run: `autotrain <command> --help` ``` It is advised to use only the `autotrain --config CONFIG_FILE` command for training when using the CLI. The autotrain commands that end users will be interested in are: - `app`: Start the AutoTrain UI - `llm`: Train a language model - `text-classification`: Train a text classification model - `text-regression`: Train a text regression model - `image-classification`: Train an image classification model - `tabular`: Train a tabular model - `spacerunner`: Train any custom model using SpaceRunner - `seq2seq`: Train a sequence-to-sequence model - `token-classification`: Train a token classification model Note: above commands are not required if you use preferred `autotrain --config CONFIG_FILE` command to train the models.
5
0
hf_public_repos/autotrain-advanced/docs
hf_public_repos/autotrain-advanced/docs/source/index.mdx
# AutoTrain ![autotrain-homepage](https://raw.githubusercontent.com/huggingface/autotrain-advanced/main/static/autotrain_homepage.png) 🤗 AutoTrain Advanced (or simply AutoTrain), developed by Hugging Face, is a robust no-code platform designed to simplify the process of training state-of-the-art models across multiple domains: Natural Language Processing (NLP), Computer Vision (CV), and even Tabular Data analysis. This tool leverages the powerful frameworks created by various teams at Hugging Face, making advanced machine learning and artificial intelligence accessible to a broader audience without requiring deep technical expertise. ## Who should use AutoTrain? AutoTrain is the perfect tool for anyone eager to dive into the world of machine learning without getting bogged down by the complexities of model training. Whether you're a business professional, researcher, educator, or hobbyist, AutoTrain offers the simplicity of a no-code interface while still providing the capabilities necessary to develop sophisticated models tailored to your unique datasets. AutoTrain is for anyone who wants to train a state-of-the-art model for a NLP, CV, Speech or even Tabular task, but doesn't want to spend time on the technical details of training a model. Our mission is to democratize machine learning technology, ensuring it is not only accessible to data scientists and ML engineers but also to those without a technical background. If you're looking to harness the power of AI for your projects, AutoTrain is your answer. ## How to use AutoTrain? We offer several ways to use AutoTrain: - No code users can use `AutoTrain Advanced` by creating a new space with AutoTrain Docker image: [Click here](https://huggingface.co/login?next=/spaces/autotrain-projects/autotrain-advanced?duplicate=true) to create AutoTrain Space. Remember to keep your space private and ensure it is equipped with the necessary hardware resources (GPU) for optimal performance. - If you prefer a more hands-on approach, AutoTrain Advanced can also be run locally through its intuitive UI or accessed via the Python API provided in the autotrain-advanced package. This flexibility allows developers to integrate AutoTrain capabilities directly into their projects, customize workflows, and enhance their toolsets with advanced machine learning functionalities. By bridging the gap between cutting-edge technology and practical usability, AutoTrain Advanced empowers users to achieve remarkable results in AI without the need for extensive programming knowledge. Start your journey with AutoTrain today and unlock the potential of machine learning for your projects! ## Walkthroughs To get started with AutoTrain, check out our walkthroughs and tutorials: - [Extractive Question Answering with AutoTrain](https://huggingface.co/blog/abhishek/extractive-qa-autotrain) - [Finetuning PaliGemma with AutoTrain](https://huggingface.co/blog/abhishek/paligemma-finetuning-autotrain) - [Training an Object Detection Model with AutoTrain](https://huggingface.co/blog/abhishek/object-detection-autotrain) - [How to Fine-Tune Custom Embedding Models Using AutoTrain](https://huggingface.co/blog/abhishek/finetune-custom-embeddings-autotrain) - [Train Custom Models on Hugging Face Spaces with AutoTrain SpaceRunner](https://huggingface.co/blog/abhishek/autotrain-spacerunner) - [How to Finetune phi-3 on MacBook Pro](https://huggingface.co/blog/abhishek/phi3-finetune-macbook) - [Finetune Mixtral 8x7B with AutoTrain](https://huggingface.co/blog/abhishek/autotrain-mixtral-dgx-cloud-local) - [Easily Train Models with H100 GPUs on NVIDIA DGX Cloud](https://huggingface.co/blog/train-dgx-cloud)
6
0
hf_public_repos/autotrain-advanced/docs
hf_public_repos/autotrain-advanced/docs/source/quickstart_py.mdx
# Quickstart with Python AutoTrain is a library that allows you to train state of the art models on Hugging Face Spaces, or locally. It provides a simple and easy-to-use interface to train models for various tasks like llm finetuning, text classification, image classification, object detection, and more. In this quickstart guide, we will show you how to train a model using AutoTrain in Python. ## Getting Started AutoTrain can be installed using pip: ```bash $ pip install autotrain-advanced ``` The example code below shows how to finetune an LLM model using AutoTrain in Python: ```python import os from autotrain.params import LLMTrainingParams from autotrain.project import AutoTrainProject params = LLMTrainingParams( model="meta-llama/Llama-3.2-1B-Instruct", data_path="HuggingFaceH4/no_robots", chat_template="tokenizer", text_column="messages", train_split="train", trainer="sft", epochs=3, batch_size=1, lr=1e-5, peft=True, quantization="int4", target_modules="all-linear", padding="right", optimizer="paged_adamw_8bit", scheduler="cosine", gradient_accumulation=8, mixed_precision="bf16", merge_adapter=True, project_name="autotrain-llama32-1b-finetune", log="tensorboard", push_to_hub=True, username=os.environ.get("HF_USERNAME"), token=os.environ.get("HF_TOKEN"), ) backend = "local" project = AutoTrainProject(params=params, backend=backend, process=True) project.create() ``` In this example, we are finetuning the `meta-llama/Llama-3.2-1B-Instruct` model on the `HuggingFaceH4/no_robots` dataset. We are training the model for 3 epochs with a batch size of 1 and a learning rate of `1e-5`. We are using the `paged_adamw_8bit` optimizer and the `cosine` scheduler. We are also using mixed precision training with a gradient accumulation of 8. The final model will be pushed to the Hugging Face Hub after training. To train the model, run the following command: ```bash $ export HF_USERNAME=<your-hf-username> $ export HF_TOKEN=<your-hf-write-token> $ python train.py ``` This will create a new project directory with the name `autotrain-llama32-1b-finetune` and start the training process. Once the training is complete, the model will be pushed to the Hugging Face Hub. Your HF_TOKEN and HF_USERNAME are only required if you want to push the model or if you are accessing a gated model or dataset. ## AutoTrainProject Class [[autodoc]] project.AutoTrainProject ## Parameters ### Text Tasks [[autodoc]] trainers.clm.params.LLMTrainingParams [[autodoc]] trainers.sent_transformers.params.SentenceTransformersParams [[autodoc]] trainers.seq2seq.params.Seq2SeqParams [[autodoc]] trainers.token_classification.params.TokenClassificationParams [[autodoc]] trainers.extractive_question_answering.params.ExtractiveQuestionAnsweringParams [[autodoc]] trainers.text_classification.params.TextClassificationParams [[autodoc]] trainers.text_regression.params.TextRegressionParams ### Image Tasks [[autodoc]] trainers.image_classification.params.ImageClassificationParams [[autodoc]] trainers.image_regression.params.ImageRegressionParams [[autodoc]] trainers.object_detection.params.ObjectDetectionParams ### Tabular Tasks [[autodoc]] trainers.tabular.params.TabularParams
7
0
hf_public_repos/autotrain-advanced/docs/source
hf_public_repos/autotrain-advanced/docs/source/tasks/object_detection.mdx
# Object Detection Object detection is a form of supervised learning where a model is trained to identify and categorize objects within images. AutoTrain simplifies the process, enabling you to train a state-of-the-art object detection model by simply uploading labeled example images. ## Preparing your data To ensure your object detection model trains effectively, follow these guidelines for preparing your data: ### Organizing Images Prepare a zip file containing your images and metadata.jsonl. ``` Archive.zip ├── 0001.png ├── 0002.png ├── 0003.png ├── . ├── . ├── . └── metadata.jsonl ``` Example for `metadata.jsonl`: ``` {"file_name": "0001.png", "objects": {"bbox": [[302.0, 109.0, 73.0, 52.0]], "category": [0]}} {"file_name": "0002.png", "objects": {"bbox": [[810.0, 100.0, 57.0, 28.0]], "category": [1]}} {"file_name": "0003.png", "objects": {"bbox": [[160.0, 31.0, 248.0, 616.0], [741.0, 68.0, 202.0, 401.0]], "category": [2, 2]}} ``` Please note that bboxes need to be in COCO format `[x, y, width, height]`. ### Image Requirements - Format: Ensure all images are in JPEG, JPG, or PNG format. - Quantity: Include at least 5 images to provide the model with sufficient examples for learning. - Exclusivity: The zip file should exclusively contain images and metadata.jsonl. No additional files or nested folders should be included. Some points to keep in mind: - The images must be jpeg, jpg or png. - There should be at least 5 images per split. - There must not be any other files in the zip file. - There must not be any other folders inside the zip folder. When train.zip is decompressed, it creates no folders: only images and metadata.jsonl. ## Parameters [[autodoc]] trainers.object_detection.params.ObjectDetectionParams
8
0
hf_public_repos/autotrain-advanced/docs/source
hf_public_repos/autotrain-advanced/docs/source/tasks/image_classification_regression.mdx
# Image Classification & Regression Image classification is a form of supervised learning where a model is trained to identify and categorize objects within images. AutoTrain simplifies the process, enabling you to train a state-of-the-art image classification model by simply uploading labeled example images. Image regression/scoring is a form of supervised learning where a model is trained to predict a score or value for an image. AutoTrain simplifies the process, enabling you to train a state-of-the-art image scoring model by simply uploading labeled example images. ## Preparing your data To ensure your image classification model trains effectively, follow these guidelines for preparing your data: ### Organizing Images For Image Classification Prepare a zip file containing your categorized images. Each category should have its own subfolder named after the class it represents. For example, to differentiate between 'cats' and 'dogs', your zip file structure should resemble the following: ``` cats_and_dogs.zip ├── cats │ ├── cat.1.jpg │ ├── cat.2.jpg │ ├── cat.3.jpg │ └── ... └── dogs ├── dog.1.jpg ├── dog.2.jpg ├── dog.3.jpg └── ... ``` You can also use a dataset from the Hugging Face Hub. Example dataset from Hugging Face Hub: [truepositive/hotdog_nothotdog](https://huggingface.co/datasets/truepositive/hotdog_nothotdog). ### Organizing Images for Image Regression/Scoring Prepare a zip file containing your images and metadata.jsonl. ``` Archive.zip ├── 0001.png ├── 0002.png ├── 0003.png ├── . ├── . ├── . └── metadata.jsonl ``` Example for `metadata.jsonl`: ``` {"file_name": "0001.png", "target": 0.5} {"file_name": "0002.png", "target": 0.7} {"file_name": "0003.png", "target": 0.3} ``` Please note that metadata.jsonl should contain the `file_name` and the `target` value for each image. You can also use a dataset from the Hugging Face Hub. Example dataset from Hugging Face Hub: [abhishek/img-quality-full](https://huggingface.co/datasets/abhishek/img-quality-full). ### Image Requirements - Format: Ensure all images are in JPEG, JPG, or PNG format. - Quantity: Include at least 5 images per class to provide the model with sufficient examples for learning. - Exclusivity: The zip file should exclusively contain folders named after the classes, and these folders should only contain relevant images. No additional files or nested folders should be included. ** Additional Tips** - Uniformity: While not required, having images of similar sizes and resolutions can help improve model performance. - Variability: Include a variety of images for each class to encompass the range of appearances and contexts the model might encounter in real-world scenarios. Some points to keep in mind: - The zip file should contain multiple folders (the classes), each folder should contain images of a single class. - The name of the folder should be the name of the class. - The images must be jpeg, jpg or png. - There should be at least 5 images per class. - There must not be any other files in the zip file. - There must not be any other folders inside the zip folder. When train.zip is decompressed, it creates two folders: cats and dogs. these are the two categories for classification. The images for both categories are in their respective folders. You can have as many categories as you want. ## Column Mapping For image classification, if you are using a `zip` dataset format, the column mapping should be default and should not be changed. ```yaml data: . . . column_mapping: image_column: image target_column: label ``` For image regression, the column mapping must be as follows: ```yaml data: . . . column_mapping: image_column: image target_column: target ``` For image regression, `metadata.jsonl` should contain the `file_name` and the `target` value for each image. If you are using a dataset from the Hugging Face Hub, you should set appropriate column mappings based on the dataset. ## Training ### Local Training To train the model locally, create a configuration file (config.yaml) with the following content: ```yaml task: image_classification base_model: google/vit-base-patch16-224 project_name: autotrain-cats-vs-dogs-finetuned log: tensorboard backend: local data: path: cats_vs_dogs train_split: train valid_split: null column_mapping: image_column: image target_column: label params: epochs: 2 batch_size: 4 lr: 2e-5 optimizer: adamw_torch scheduler: linear gradient_accumulation: 1 mixed_precision: fp16 hub: username: ${HF_USERNAME} token: ${HF_TOKEN} push_to_hub: true ``` Here, we are using `cats_and_dogs` dataset from Hugging Face Hub. The model is trained for 2 epochs with a batch size of 4 and a learning rate of `2e-5`. We are using the `adamw_torch` optimizer and the `linear` scheduler. We are also using mixed precision training with a gradient accumulation of 1. In order to use a local dataset, you can change the `data` section to: ```yaml data: path: data/ train_split: train # this folder inside data/ will be used for training, it contains the images in subfolders. valid_split: valid # this folder inside data/ will be used for validation, it contains the images in subfolders. can also be null. column_mapping: image_column: image target_column: label ``` Similarly, for image regression, you can use the following configuration file: ```yaml task: image_regression base_model: microsoft/resnet-50 project_name: autotrain-img-quality-resnet50 log: tensorboard backend: local data: path: abhishek/img-quality-full train_split: train valid_split: null column_mapping: image_column: image target_column: target params: epochs: 10 batch_size: 8 lr: 2e-3 optimizer: adamw_torch scheduler: cosine gradient_accumulation: 1 mixed_precision: fp16 hub: username: ${HF_USERNAME} token: ${HF_TOKEN} push_to_hub: true ``` To train the model, run the following command: ```bash $ autotrain --config config.yaml ``` This will start the training process and save the model to the Hugging Face Hub after training is complete. In case you dont want to save the model to the hub, you can set `push_to_hub` to `false` in the configuration file. ### Training on Hugging Face Spaces To train the model on Hugging Face Spaces, create a training space as described in `Quickstart` section. An example UI for training an image scoring model on Hugging Face Spaces is shown below: ![llm-finetuning](https://raw.githubusercontent.com/huggingface/autotrain-advanced/main/static/img_reg_ui.png) In this example, we are training an image scoring model using the `microsoft/resnet-50` model on the `abhishek/img-quality-full` dataset. We are training the model for 3 epochs with a batch size of 8 and a learning rate of `5e-5`. We are using the `adamw_torch` optimizer and the `linear` scheduler. We are also using mixed precision training with a gradient accumulation of 1. Note how the column mapping has now been changed and `target` points to `quality_mos` column in the dataset. To train the model, click on the `Start Training` button. This will start the training process and save the model to the Hugging Face Hub after training is complete. ## Parameters ### Image Classification Parameters [[autodoc]] trainers.image_classification.params.ImageClassificationParams ### Image Regression Parameters [[autodoc]] trainers.image_regression.params.ImageRegressionParams
9
0
hf_public_repos/autotrain-advanced/src
hf_public_repos/autotrain-advanced/src/autotrain/config.py
import os HF_API = os.getenv("HF_API", "https://huggingface.co")
0
0
hf_public_repos/autotrain-advanced/src
hf_public_repos/autotrain-advanced/src/autotrain/logging.py
import sys from dataclasses import dataclass from loguru import logger IS_ACCELERATE_AVAILABLE = False try: from accelerate.state import PartialState IS_ACCELERATE_AVAILABLE = True except ImportError: pass @dataclass class Logger: """ A custom logger class that sets up and manages logging configuration. Methods ------- __post_init__(): Initializes the logger with a specific format and sets up the logger. _should_log(record): Determines if a log record should be logged based on the process state. setup_logger(): Configures the logger to output to stdout with the specified format and filter. get_logger(): Returns the configured logger instance. """ def __post_init__(self): self.log_format = ( "<level>{level: <8}</level> | " "<green>{time:YYYY-MM-DD HH:mm:ss}</green> | " "<cyan>{name}</cyan>:<cyan>{function}</cyan>:<cyan>{line}</cyan> - " "<level>{message}</level>" ) self.logger = logger self.setup_logger() def _should_log(self, record): if not IS_ACCELERATE_AVAILABLE: return None return PartialState().is_main_process def setup_logger(self): self.logger.remove() self.logger.add( sys.stdout, format=self.log_format, filter=lambda x: self._should_log(x) if IS_ACCELERATE_AVAILABLE else None, ) def get_logger(self): return self.logger
1
0
hf_public_repos/autotrain-advanced/src
hf_public_repos/autotrain-advanced/src/autotrain/parser.py
import os from dataclasses import dataclass import requests import yaml from autotrain import logger from autotrain.project import ( AutoTrainProject, ext_qa_munge_data, img_clf_munge_data, img_obj_detect_munge_data, img_reg_munge_data, llm_munge_data, sent_transformers_munge_data, seq2seq_munge_data, tabular_munge_data, text_clf_munge_data, text_reg_munge_data, token_clf_munge_data, vlm_munge_data, ) from autotrain.tasks import TASKS from autotrain.trainers.clm.params import LLMTrainingParams from autotrain.trainers.extractive_question_answering.params import ExtractiveQuestionAnsweringParams from autotrain.trainers.image_classification.params import ImageClassificationParams from autotrain.trainers.image_regression.params import ImageRegressionParams from autotrain.trainers.object_detection.params import ObjectDetectionParams from autotrain.trainers.sent_transformers.params import SentenceTransformersParams from autotrain.trainers.seq2seq.params import Seq2SeqParams from autotrain.trainers.tabular.params import TabularParams from autotrain.trainers.text_classification.params import TextClassificationParams from autotrain.trainers.text_regression.params import TextRegressionParams from autotrain.trainers.token_classification.params import TokenClassificationParams from autotrain.trainers.vlm.params import VLMTrainingParams @dataclass class AutoTrainConfigParser: """ AutoTrainConfigParser is a class responsible for parsing and validating the yaml configuration required to run various tasks in the AutoTrain framework. It supports loading configurations from both local files and remote URLs, and maps task aliases to their respective parameters and data munging functions. Attributes: config_path (str): Path or URL to the configuration file. config (dict): Parsed configuration data. task_param_map (dict): Mapping of task names to their parameter classes. munge_data_map (dict): Mapping of task names to their data munging functions. task_aliases (dict): Mapping of task aliases to their canonical task names. task (str): The resolved task name from the configuration. backend (str): The backend specified in the configuration. parsed_config (dict): The parsed configuration parameters. Methods: __post_init__(): Initializes the parser, loads the configuration, and validates required fields. _parse_config(): Parses the configuration and extracts relevant parameters based on the task. run(): Executes the task with the parsed configuration. """ config_path: str def __post_init__(self): if self.config_path.startswith("http"): response = requests.get(self.config_path) if response.status_code == 200: self.config = yaml.safe_load(response.content) else: raise ValueError("Failed to retrieve YAML file.") else: with open(self.config_path, "r") as f: self.config = yaml.safe_load(f) self.task_param_map = { "lm_training": LLMTrainingParams, "image_binary_classification": ImageClassificationParams, "image_multi_class_classification": ImageClassificationParams, "image_object_detection": ObjectDetectionParams, "seq2seq": Seq2SeqParams, "tabular": TabularParams, "text_binary_classification": TextClassificationParams, "text_multi_class_classification": TextClassificationParams, "text_single_column_regression": TextRegressionParams, "text_token_classification": TokenClassificationParams, "sentence_transformers": SentenceTransformersParams, "image_single_column_regression": ImageRegressionParams, "vlm": VLMTrainingParams, "text_extractive_question_answering": ExtractiveQuestionAnsweringParams, } self.munge_data_map = { "lm_training": llm_munge_data, "tabular": tabular_munge_data, "seq2seq": seq2seq_munge_data, "image_multi_class_classification": img_clf_munge_data, "image_object_detection": img_obj_detect_munge_data, "text_multi_class_classification": text_clf_munge_data, "text_token_classification": token_clf_munge_data, "text_single_column_regression": text_reg_munge_data, "sentence_transformers": sent_transformers_munge_data, "image_single_column_regression": img_reg_munge_data, "vlm": vlm_munge_data, "text_extractive_question_answering": ext_qa_munge_data, } self.task_aliases = { "llm": "lm_training", "llm-sft": "lm_training", "llm-orpo": "lm_training", "llm-generic": "lm_training", "llm-dpo": "lm_training", "llm-reward": "lm_training", "image_binary_classification": "image_multi_class_classification", "image-binary-classification": "image_multi_class_classification", "image_classification": "image_multi_class_classification", "image-classification": "image_multi_class_classification", "seq2seq": "seq2seq", "tabular": "tabular", "text_binary_classification": "text_multi_class_classification", "text-binary-classification": "text_multi_class_classification", "text_classification": "text_multi_class_classification", "text-classification": "text_multi_class_classification", "text_single_column_regression": "text_single_column_regression", "text-single-column-regression": "text_single_column_regression", "text_regression": "text_single_column_regression", "text-regression": "text_single_column_regression", "token_classification": "text_token_classification", "token-classification": "text_token_classification", "image_object_detection": "image_object_detection", "image-object-detection": "image_object_detection", "object_detection": "image_object_detection", "object-detection": "image_object_detection", "st": "sentence_transformers", "st:pair": "sentence_transformers", "st:pair_class": "sentence_transformers", "st:pair_score": "sentence_transformers", "st:triplet": "sentence_transformers", "st:qa": "sentence_transformers", "sentence-transformers:pair": "sentence_transformers", "sentence-transformers:pair_class": "sentence_transformers", "sentence-transformers:pair_score": "sentence_transformers", "sentence-transformers:triplet": "sentence_transformers", "sentence-transformers:qa": "sentence_transformers", "image_single_column_regression": "image_single_column_regression", "image-single-column-regression": "image_single_column_regression", "image_regression": "image_single_column_regression", "image-regression": "image_single_column_regression", "image-scoring": "image_single_column_regression", "vlm:captioning": "vlm", "vlm:vqa": "vlm", "extractive_question_answering": "text_extractive_question_answering", "ext_qa": "text_extractive_question_answering", "ext-qa": "text_extractive_question_answering", "extractive-qa": "text_extractive_question_answering", } task = self.config.get("task") self.task = self.task_aliases.get(task, task) if self.task is None: raise ValueError("Task is required in the configuration file") if self.task not in TASKS: raise ValueError(f"Task `{self.task}` is not supported") self.backend = self.config.get("backend") if self.backend is None: raise ValueError("Backend is required in the configuration file") logger.info(f"Running task: {self.task}") logger.info(f"Using backend: {self.backend}") self.parsed_config = self._parse_config() def _parse_config(self): params = { "model": self.config["base_model"], "project_name": self.config["project_name"], } params["data_path"] = self.config["data"]["path"] if self.task == "lm_training": params["chat_template"] = self.config["data"]["chat_template"] if "-" in self.config["task"]: params["trainer"] = self.config["task"].split("-")[1] if params["trainer"] == "generic": params["trainer"] = "default" if params["trainer"] not in ["sft", "orpo", "dpo", "reward", "default"]: raise ValueError("Invalid LLM training task") if self.task == "sentence_transformers": params["trainer"] = self.config["task"].split(":")[1] if self.task == "vlm": params["trainer"] = self.config["task"].split(":")[1] for k, v in self.config["data"]["column_mapping"].items(): params[k] = v params["train_split"] = self.config["data"]["train_split"] params["valid_split"] = self.config["data"]["valid_split"] params["log"] = self.config["log"] if "hub" in self.config: params["username"] = self.config["hub"]["username"] params["token"] = self.config["hub"]["token"] params["push_to_hub"] = self.config["hub"]["push_to_hub"] else: params["username"] = None params["token"] = None params["push_to_hub"] = False if params["username"]: if params["username"].startswith("${"): params["username"] = os.environ.get(params["username"][2:-1]) if params["token"]: if params["token"].startswith("${"): params["token"] = os.environ.get(params["token"][2:-1]) other_params = self.config.get("params") if other_params: params.update(other_params) return params def run(self): _params = self.task_param_map[self.task](**self.parsed_config) logger.info(_params) _munge_fn = self.munge_data_map[self.task] _munge_fn(_params, local=self.backend.startswith("local")) project = AutoTrainProject(params=_params, backend=self.backend) job_id = project.create() logger.info(f"Job ID: {job_id}")
2
0
hf_public_repos/autotrain-advanced/src
hf_public_repos/autotrain-advanced/src/autotrain/help.py
autotrain_user_info = """ <p>Please choose the user or organization who is creating the AutoTrain Project.</p> <p>In case of non-free tier, this user or organization will be billed.</p> """ project_name_info = """A unique name for the AutoTrain Project. This name will be used to identify the project in the AutoTrain dashboard.""" column_mapping_info = """ <p>Column Mapping is used to map the columns in the dataset to the columns in the AutoTrain Project.</p> <p>For example, if your dataset has a column named "input" and you want to use it as the input for the model, you can map it to the "text" column in the AutoTrain Project.</p> <p>Similarly, if your dataset has a column named "label" and you want to use it as the label for the model, you can map it to the "target" column in the AutoTrain Project.</p> <p>Column mapping keys are AutoTrain Project column names and values are your dataset column names.</p> <p>For tabular datasets, you can map multiple targets to the "label" column. This will enable multi-label task. The column names must be a comma separated list.</p> <p>For other tasks, mappings are one-to-one.</p> <p>Note: column names are case sensitive.</p> """ base_model_info = """ <p>Base Model is the model that will be used for fine-tuning.</p> <p>For example, if you are training a text classification model, you can choose a base model like "bert-base-uncased".</p> <p>For a list of available models, please see <a href="https://huggingface.co/models" target="_blank">HuggingFace Model Hub</a>.</p> <p>Note: not all models listed here are going to be compatible with your data and parameters. You should select a model that is compatible with your task, data and parameters.</p> Dont see your favorite model? You can also use a custom model by providing the model name in an environment variable: AUTOTRAIN_CUSTOM_MODELS. For example, go to settings and add a new environment variable with the key AUTOTRAIN_CUSTOM_MODELS and value as the model name (e.g. google/gemma-7b) """ hardware_info = """ <p>Hardware is the machine that will be used for training.</p> <p>Please choose a hardware that is compatible with your task, data and parameters.</p> """ task_info = """ <p>Task is the type of model you want to train.</p> <p>Please choose a task that is compatible with your data and parameters.</p> <p>For example, if you are training a text classification model, you can choose "Text Classification" task.</p> """ APP_IMAGE_CLASSIFICATION_DATA_HELP = """The data for the Image Classification task should be in the following format: - The data should be in a zip file. - The zip file should contain multiple folders (the classes), each folder should contain images of a single class. - The name of the folder should be the name of the class. - The images must be jpeg, jpg or png. - There should be at least 5 images per class. - There should not be any other files in the zip file. - There should not be any other folders inside the zip folder. """ APP_LM_TRAINING_TYPE = """There are two types of Language Model Training: - generic - chat In the generic mode, you provide a CSV with a text column which has already been formatted by you for training a language model. In the chat mode, you provide a CSV with two or three text columns: prompt, context (optional) and response. Context column can be empty for samples if not needed. You can also have a "prompt start" column. If provided, "prompt start" will be prepended before the prompt column. Please see [this](https://huggingface.co/datasets/tatsu-lab/alpaca) dataset which has both formats in the same dataset. """ def get_app_help(element_id): if element_id == "autotrain_user_info": return autotrain_user_info elif element_id == "project_name_info": return project_name_info elif element_id == "column_mapping_info": return column_mapping_info elif element_id == "base_model_info": return base_model_info elif element_id == "hardware_info": return hardware_info elif element_id == "task_info": return task_info else: return "No help available for this element."
3
0
hf_public_repos/autotrain-advanced/src
hf_public_repos/autotrain-advanced/src/autotrain/client.py
import os from dataclasses import dataclass from typing import Optional import requests from autotrain import logger AUTOTRAIN_API = os.environ.get("AUTOTRAIN_API", "https://autotrain-projects-autotrain-advanced.hf.space/") BACKENDS = { "spaces-a10g-large": "a10g-large", "spaces-a10g-small": "a10g-small", "spaces-a100-large": "a100-large", "spaces-t4-medium": "t4-medium", "spaces-t4-small": "t4-small", "spaces-cpu-upgrade": "cpu-upgrade", "spaces-cpu-basic": "cpu-basic", "spaces-l4x1": "l4x1", "spaces-l4x4": "l4x4", "spaces-l40sx1": "l40sx1", "spaces-l40sx4": "l40sx4", "spaces-l40sx8": "l40sx8", "spaces-a10g-largex2": "a10g-largex2", "spaces-a10g-largex4": "a10g-largex4", } PARAMS = {} PARAMS["llm"] = { "target_modules": "all-linear", "log": "tensorboard", "mixed_precision": "fp16", "quantization": "int4", "peft": True, "block_size": 1024, "epochs": 3, "padding": "right", "chat_template": "none", "max_completion_length": 128, "distributed_backend": "ddp", "scheduler": "linear", "merge_adapter": True, } PARAMS["text-classification"] = { "mixed_precision": "fp16", "log": "tensorboard", } PARAMS["st"] = { "mixed_precision": "fp16", "log": "tensorboard", } PARAMS["image-classification"] = { "mixed_precision": "fp16", "log": "tensorboard", } PARAMS["image-object-detection"] = { "mixed_precision": "fp16", "log": "tensorboard", } PARAMS["seq2seq"] = { "mixed_precision": "fp16", "target_modules": "all-linear", "log": "tensorboard", } PARAMS["tabular"] = { "categorical_imputer": "most_frequent", "numerical_imputer": "median", "numeric_scaler": "robust", } PARAMS["token-classification"] = { "mixed_precision": "fp16", "log": "tensorboard", } PARAMS["text-regression"] = { "mixed_precision": "fp16", "log": "tensorboard", } PARAMS["image-regression"] = { "mixed_precision": "fp16", "log": "tensorboard", } PARAMS["vlm"] = { "mixed_precision": "fp16", "target_modules": "all-linear", "log": "tensorboard", "quantization": "int4", "peft": True, "epochs": 3, } PARAMS["extractive-qa"] = { "mixed_precision": "fp16", "log": "tensorboard", "max_seq_length": 512, "max_doc_stride": 128, } DEFAULT_COLUMN_MAPPING = {} DEFAULT_COLUMN_MAPPING["llm:sft"] = {"text_column": "text"} DEFAULT_COLUMN_MAPPING["llm:generic"] = {"text_column": "text"} DEFAULT_COLUMN_MAPPING["llm:default"] = {"text_column": "text"} DEFAULT_COLUMN_MAPPING["llm:dpo"] = { "prompt_column": "prompt", "text_column": "chosen", "rejected_text_column": "rejected", } DEFAULT_COLUMN_MAPPING["llm:orpo"] = { "prompt_column": "prompt", "text_column": "chosen", "rejected_text_column": "rejected", } DEFAULT_COLUMN_MAPPING["llm:reward"] = {"text_column": "chosen", "rejected_text_column": "rejected"} DEFAULT_COLUMN_MAPPING["vlm:captioning"] = {"image_column": "image", "text_column": "caption"} DEFAULT_COLUMN_MAPPING["vlm:vqa"] = { "image_column": "image", "prompt_text_column": "question", "text_column": "answer", } DEFAULT_COLUMN_MAPPING["st:pair"] = {"sentence1": "anchor", "sentence2": "positive"} DEFAULT_COLUMN_MAPPING["st:pair_class"] = { "sentence1_column": "premise", "sentence2_column": "hypothesis", "target_column": "label", } DEFAULT_COLUMN_MAPPING["st:pair_score"] = { "sentence1_column": "sentence1", "sentence2_column": "sentence2", "target_column": "score", } DEFAULT_COLUMN_MAPPING["st:triplet"] = { "sentence1_column": "anchor", "sentence2_column": "positive", "sentence3_column": "negative", } DEFAULT_COLUMN_MAPPING["st:qa"] = {"sentence1_column": "query", "sentence2_column": "answer"} DEFAULT_COLUMN_MAPPING["text-classification"] = {"text_column": "text", "target_column": "target"} DEFAULT_COLUMN_MAPPING["seq2seq"] = {"text_column": "text", "target_column": "target"} DEFAULT_COLUMN_MAPPING["text-regression"] = {"text_column": "text", "target_column": "target"} DEFAULT_COLUMN_MAPPING["token-classification"] = {"text_column": "tokens", "target_column": "tags"} DEFAULT_COLUMN_MAPPING["image-classification"] = {"image_column": "image", "target_column": "label"} DEFAULT_COLUMN_MAPPING["image-regression"] = {"image_column": "image", "target_column": "target"} DEFAULT_COLUMN_MAPPING["image-object-detection"] = {"image_column": "image", "objects_column": "objects"} DEFAULT_COLUMN_MAPPING["tabular:classification"] = {"id_column": "id", "target__columns": ["target"]} DEFAULT_COLUMN_MAPPING["tabular:regression"] = {"id_column": "id", "target_columns": ["target"]} DEFAULT_COLUMN_MAPPING["extractive-qa"] = { "text_column": "context", "question_column": "question", "answer_column": "answers", } VALID_TASKS = [k for k in DEFAULT_COLUMN_MAPPING.keys()] @dataclass class Client: """ A client to interact with the AutoTrain API. Attributes: host (Optional[str]): The host URL for the AutoTrain API. token (Optional[str]): The authentication token for the API. username (Optional[str]): The username for the API. Methods: __post_init__(): Initializes the client with default values if not provided and sets up headers. __str__(): Returns a string representation of the client with masked token. __repr__(): Returns a string representation of the client with masked token. create(project_name: str, task: str, base_model: str, hardware: str, dataset: str, train_split: str, column_mapping: Optional[dict] = None, params: Optional[dict] = None, valid_split: Optional[str] = None): Creates a new project on the AutoTrain platform. get_logs(job_id: str): Retrieves logs for a given job ID. stop_training(job_id: str): Stops the training for a given job ID. """ host: Optional[str] = None token: Optional[str] = None username: Optional[str] = None def __post_init__(self): if self.host is None: self.host = AUTOTRAIN_API if self.token is None: self.token = os.environ.get("HF_TOKEN") if self.username is None: self.username = os.environ.get("HF_USERNAME") if self.token is None or self.username is None: raise ValueError("Please provide a valid username and token") self.headers = {"Authorization": f"Bearer {self.token}", "Content-Type": "application/json"} def __str__(self): return f"Client(host={self.host}, token=****, username={self.username})" def __repr__(self): return self.__str__() def create( self, project_name: str, task: str, base_model: str, backend: str, dataset: str, train_split: str, column_mapping: Optional[dict] = None, params: Optional[dict] = None, valid_split: Optional[str] = None, ): if task not in VALID_TASKS: raise ValueError(f"Invalid task. Valid tasks are: {VALID_TASKS}") if backend not in BACKENDS: raise ValueError(f"Invalid backend. Valid backends are: {list(BACKENDS.keys())}") url = f"{self.host}/api/create_project" if task == "llm:defaut": task = "llm:generic" if params is None: params = {} if task.startswith("llm"): params = {k: v for k, v in PARAMS["llm"].items() if k not in params} elif task.startswith("st"): params = {k: v for k, v in PARAMS["st"].items() if k not in params} else: params = {k: v for k, v in PARAMS[task].items() if k not in params} if column_mapping is None: column_mapping = DEFAULT_COLUMN_MAPPING[task] # check if column_mapping is valid for the task default_col_map = DEFAULT_COLUMN_MAPPING[task] missing_cols = [] for k, _ in default_col_map.items(): if k not in column_mapping.keys(): missing_cols.append(k) if missing_cols: raise ValueError(f"Missing columns in column_mapping: {missing_cols}") data = { "project_name": project_name, "task": task, "base_model": base_model, "hardware": backend, "params": params, "username": self.username, "column_mapping": column_mapping, "hub_dataset": dataset, "train_split": train_split, "valid_split": valid_split, } response = requests.post(url, headers=self.headers, json=data) if response.status_code == 200: resp = response.json() logger.info( f"Project created successfully. Job ID: {resp['job_id']}. View logs at: https://hf.co/spaces/{resp['job_id']}" ) return resp else: logger.error(f"Error creating project: {response.json()}") return response.json() def get_logs(self, job_id: str): url = f"{self.host}/api/logs" data = {"jid": job_id} response = requests.post(url, headers=self.headers, json=data) return response.json() def stop_training(self, job_id: str): url = f"{self.host}/api/stop_training/{job_id}" data = {"jid": job_id} response = requests.post(url, headers=self.headers, json=data) return response.json()
4
0
hf_public_repos/autotrain-advanced/src
hf_public_repos/autotrain-advanced/src/autotrain/__init__.py
# coding=utf-8 # Copyright 2020-2023 The HuggingFace AutoTrain Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Lint as: python3 # pylint: enable=line-too-long import os os.environ["BITSANDBYTES_NOWELCOME"] = "1" os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3" os.environ["TOKENIZERS_PARALLELISM"] = "false" import warnings try: import torch._dynamo torch._dynamo.config.suppress_errors = True except ImportError: pass from autotrain.logging import Logger warnings.filterwarnings("ignore", category=UserWarning, module="tensorflow") warnings.filterwarnings("ignore", category=UserWarning, module="transformers") warnings.filterwarnings("ignore", category=UserWarning, module="peft") warnings.filterwarnings("ignore", category=UserWarning, module="accelerate") warnings.filterwarnings("ignore", category=UserWarning, module="datasets") warnings.filterwarnings("ignore", category=FutureWarning, module="accelerate") warnings.filterwarnings("ignore", category=UserWarning, module="huggingface_hub") logger = Logger().get_logger() __version__ = "0.8.34.dev0" def is_colab(): try: import google.colab return True except ImportError: return False def is_unsloth_available(): try: from unsloth import FastLanguageModel return True except Exception as e: logger.warning("Unsloth not available, continuing without it") logger.warning(e) return False
5
0
hf_public_repos/autotrain-advanced/src
hf_public_repos/autotrain-advanced/src/autotrain/dataset.py
import io import os import uuid import zipfile from dataclasses import dataclass from typing import Dict, List, Optional import pandas as pd from autotrain.preprocessor.tabular import ( TabularBinaryClassificationPreprocessor, TabularMultiClassClassificationPreprocessor, TabularMultiColumnRegressionPreprocessor, TabularMultiLabelClassificationPreprocessor, TabularSingleColumnRegressionPreprocessor, ) from autotrain.preprocessor.text import ( LLMPreprocessor, SentenceTransformersPreprocessor, Seq2SeqPreprocessor, TextBinaryClassificationPreprocessor, TextExtractiveQuestionAnsweringPreprocessor, TextMultiClassClassificationPreprocessor, TextSingleColumnRegressionPreprocessor, TextTokenClassificationPreprocessor, ) from autotrain.preprocessor.vision import ( ImageClassificationPreprocessor, ImageRegressionPreprocessor, ObjectDetectionPreprocessor, ) from autotrain.preprocessor.vlm import VLMPreprocessor def remove_non_image_files(folder): """ Remove non-image files from a specified folder and its subfolders. This function iterates through all files in the given folder and its subfolders, and removes any file that does not have an allowed image file extension. The allowed extensions are: .jpg, .jpeg, .png, .JPG, .JPEG, .PNG, and .jsonl. Args: folder (str): The path to the folder from which non-image files should be removed. Returns: None """ # Define allowed image file extensions allowed_extensions = {".jpg", ".jpeg", ".png", ".JPG", ".JPEG", ".PNG", ".jsonl"} # Iterate through all files in the folder for root, dirs, files in os.walk(folder): for file in files: # Get the file extension file_extension = os.path.splitext(file)[1] # If the file extension is not in the allowed list, remove the file if file_extension.lower() not in allowed_extensions: file_path = os.path.join(root, file) os.remove(file_path) print(f"Removed file: {file_path}") # Recursively call the function on each subfolder for subfolder in dirs: remove_non_image_files(os.path.join(root, subfolder)) @dataclass class AutoTrainImageClassificationDataset: """ A class to handle image classification datasets for AutoTrain. Attributes: train_data (str): Path to the training data. token (str): Authentication token. project_name (str): Name of the project. username (str): Username of the project owner. valid_data (Optional[str]): Path to the validation data. Default is None. percent_valid (Optional[float]): Percentage of training data to use for validation. Default is None. local (bool): Flag to indicate if the data is local. Default is False. Methods: __str__() -> str: Returns a string representation of the dataset. __post_init__(): Initializes the dataset and sets default values for validation data. prepare(): Prepares the dataset for training by extracting and preprocessing the data. """ train_data: str token: str project_name: str username: str valid_data: Optional[str] = None percent_valid: Optional[float] = None local: bool = False def __str__(self) -> str: info = f"Dataset: {self.project_name} ({self.task})\n" info += f"Train data: {self.train_data}\n" info += f"Valid data: {self.valid_data}\n" return info def __post_init__(self): self.task = "image_multi_class_classification" if not self.valid_data and self.percent_valid is None: self.percent_valid = 0.2 elif self.valid_data and self.percent_valid is not None: raise ValueError("You can only specify one of valid_data or percent_valid") elif self.valid_data: self.percent_valid = 0.0 def prepare(self): valid_dir = None if not isinstance(self.train_data, str): cache_dir = os.environ.get("HF_HOME") if not cache_dir: cache_dir = os.path.join(os.path.expanduser("~"), ".cache", "huggingface") random_uuid = uuid.uuid4() train_dir = os.path.join(cache_dir, "autotrain", str(random_uuid)) os.makedirs(train_dir, exist_ok=True) self.train_data.seek(0) content = self.train_data.read() bytes_io = io.BytesIO(content) zip_ref = zipfile.ZipFile(bytes_io, "r") zip_ref.extractall(train_dir) # remove the __MACOSX directory macosx_dir = os.path.join(train_dir, "__MACOSX") if os.path.exists(macosx_dir): os.system(f"rm -rf {macosx_dir}") remove_non_image_files(train_dir) if self.valid_data: random_uuid = uuid.uuid4() valid_dir = os.path.join(cache_dir, "autotrain", str(random_uuid)) os.makedirs(valid_dir, exist_ok=True) self.valid_data.seek(0) content = self.valid_data.read() bytes_io = io.BytesIO(content) zip_ref = zipfile.ZipFile(bytes_io, "r") zip_ref.extractall(valid_dir) # remove the __MACOSX directory macosx_dir = os.path.join(valid_dir, "__MACOSX") if os.path.exists(macosx_dir): os.system(f"rm -rf {macosx_dir}") remove_non_image_files(valid_dir) else: train_dir = self.train_data if self.valid_data: valid_dir = self.valid_data preprocessor = ImageClassificationPreprocessor( train_data=train_dir, valid_data=valid_dir, token=self.token, project_name=self.project_name, username=self.username, local=self.local, ) return preprocessor.prepare() @dataclass class AutoTrainObjectDetectionDataset: """ A dataset class for AutoTrain object detection tasks. Attributes: train_data (str): Path to the training data. token (str): Authentication token. project_name (str): Name of the project. username (str): Username of the project owner. valid_data (Optional[str]): Path to the validation data. Default is None. percent_valid (Optional[float]): Percentage of training data to be used for validation. Default is None. local (bool): Flag indicating if the data is local. Default is False. Methods: __str__() -> str: Returns a string representation of the dataset. __post_init__(): Initializes the dataset and sets default values for validation data. prepare(): Prepares the dataset for training by extracting and preprocessing the data. """ train_data: str token: str project_name: str username: str valid_data: Optional[str] = None percent_valid: Optional[float] = None local: bool = False def __str__(self) -> str: info = f"Dataset: {self.project_name} ({self.task})\n" info += f"Train data: {self.train_data}\n" info += f"Valid data: {self.valid_data}\n" return info def __post_init__(self): self.task = "image_object_detection" if not self.valid_data and self.percent_valid is None: self.percent_valid = 0.2 elif self.valid_data and self.percent_valid is not None: raise ValueError("You can only specify one of valid_data or percent_valid") elif self.valid_data: self.percent_valid = 0.0 def prepare(self): valid_dir = None if not isinstance(self.train_data, str): cache_dir = os.environ.get("HF_HOME") if not cache_dir: cache_dir = os.path.join(os.path.expanduser("~"), ".cache", "huggingface") random_uuid = uuid.uuid4() train_dir = os.path.join(cache_dir, "autotrain", str(random_uuid)) os.makedirs(train_dir, exist_ok=True) self.train_data.seek(0) content = self.train_data.read() bytes_io = io.BytesIO(content) zip_ref = zipfile.ZipFile(bytes_io, "r") zip_ref.extractall(train_dir) # remove the __MACOSX directory macosx_dir = os.path.join(train_dir, "__MACOSX") if os.path.exists(macosx_dir): os.system(f"rm -rf {macosx_dir}") remove_non_image_files(train_dir) if self.valid_data: random_uuid = uuid.uuid4() valid_dir = os.path.join(cache_dir, "autotrain", str(random_uuid)) os.makedirs(valid_dir, exist_ok=True) self.valid_data.seek(0) content = self.valid_data.read() bytes_io = io.BytesIO(content) zip_ref = zipfile.ZipFile(bytes_io, "r") zip_ref.extractall(valid_dir) # remove the __MACOSX directory macosx_dir = os.path.join(valid_dir, "__MACOSX") if os.path.exists(macosx_dir): os.system(f"rm -rf {macosx_dir}") remove_non_image_files(valid_dir) else: train_dir = self.train_data if self.valid_data: valid_dir = self.valid_data preprocessor = ObjectDetectionPreprocessor( train_data=train_dir, valid_data=valid_dir, token=self.token, project_name=self.project_name, username=self.username, local=self.local, ) return preprocessor.prepare() @dataclass class AutoTrainVLMDataset: """ A class to handle dataset for AutoTrain Vision-Language Model (VLM) task. Attributes: ----------- train_data : str Path to the training data or a file-like object containing the training data. token : str Authentication token for accessing the dataset. project_name : str Name of the project. username : str Username of the project owner. column_mapping : Dict[str, str] Mapping of columns in the dataset. valid_data : Optional[str], default=None Path to the validation data or a file-like object containing the validation data. percent_valid : Optional[float], default=None Percentage of the training data to be used for validation if `valid_data` is not provided. local : bool, default=False Flag indicating whether the dataset is stored locally. Methods: -------- __str__() -> str: Returns a string representation of the dataset. __post_init__(): Initializes the dataset and sets default values for validation data percentage. prepare(): Prepares the dataset for training by extracting and processing the data. """ train_data: str token: str project_name: str username: str column_mapping: Dict[str, str] valid_data: Optional[str] = None percent_valid: Optional[float] = None local: bool = False def __str__(self) -> str: info = f"Dataset: {self.project_name} ({self.task})\n" info += f"Train data: {self.train_data}\n" info += f"Valid data: {self.valid_data}\n" return info def __post_init__(self): self.task = "vlm" if not self.valid_data and self.percent_valid is None: self.percent_valid = 0.2 elif self.valid_data and self.percent_valid is not None: raise ValueError("You can only specify one of valid_data or percent_valid") elif self.valid_data: self.percent_valid = 0.0 def prepare(self): valid_dir = None if not isinstance(self.train_data, str): cache_dir = os.environ.get("HF_HOME") if not cache_dir: cache_dir = os.path.join(os.path.expanduser("~"), ".cache", "huggingface") random_uuid = uuid.uuid4() train_dir = os.path.join(cache_dir, "autotrain", str(random_uuid)) os.makedirs(train_dir, exist_ok=True) self.train_data.seek(0) content = self.train_data.read() bytes_io = io.BytesIO(content) zip_ref = zipfile.ZipFile(bytes_io, "r") zip_ref.extractall(train_dir) # remove the __MACOSX directory macosx_dir = os.path.join(train_dir, "__MACOSX") if os.path.exists(macosx_dir): os.system(f"rm -rf {macosx_dir}") remove_non_image_files(train_dir) if self.valid_data: random_uuid = uuid.uuid4() valid_dir = os.path.join(cache_dir, "autotrain", str(random_uuid)) os.makedirs(valid_dir, exist_ok=True) self.valid_data.seek(0) content = self.valid_data.read() bytes_io = io.BytesIO(content) zip_ref = zipfile.ZipFile(bytes_io, "r") zip_ref.extractall(valid_dir) # remove the __MACOSX directory macosx_dir = os.path.join(valid_dir, "__MACOSX") if os.path.exists(macosx_dir): os.system(f"rm -rf {macosx_dir}") remove_non_image_files(valid_dir) else: train_dir = self.train_data if self.valid_data: valid_dir = self.valid_data preprocessor = VLMPreprocessor( train_data=train_dir, valid_data=valid_dir, token=self.token, project_name=self.project_name, username=self.username, local=self.local, column_mapping=self.column_mapping, ) return preprocessor.prepare() @dataclass class AutoTrainImageRegressionDataset: """ AutoTrainImageRegressionDataset is a class designed for handling image regression datasets in the AutoTrain framework. Attributes: train_data (str): Path to the training data. token (str): Authentication token. project_name (str): Name of the project. username (str): Username of the project owner. valid_data (Optional[str]): Path to the validation data. Default is None. percent_valid (Optional[float]): Percentage of training data to be used for validation if valid_data is not provided. Default is None. local (bool): Flag indicating if the data is local. Default is False. Methods: __str__() -> str: Returns a string representation of the dataset information. __post_init__(): Initializes the task attribute and sets the percent_valid attribute based on the presence of valid_data. prepare(): Prepares the dataset for training by extracting and organizing the data, and returns a preprocessor object. """ train_data: str token: str project_name: str username: str valid_data: Optional[str] = None percent_valid: Optional[float] = None local: bool = False def __str__(self) -> str: info = f"Dataset: {self.project_name} ({self.task})\n" info += f"Train data: {self.train_data}\n" info += f"Valid data: {self.valid_data}\n" return info def __post_init__(self): self.task = "image_single_column_regression" if not self.valid_data and self.percent_valid is None: self.percent_valid = 0.2 elif self.valid_data and self.percent_valid is not None: raise ValueError("You can only specify one of valid_data or percent_valid") elif self.valid_data: self.percent_valid = 0.0 def prepare(self): valid_dir = None if not isinstance(self.train_data, str): cache_dir = os.environ.get("HF_HOME") if not cache_dir: cache_dir = os.path.join(os.path.expanduser("~"), ".cache", "huggingface") random_uuid = uuid.uuid4() train_dir = os.path.join(cache_dir, "autotrain", str(random_uuid)) os.makedirs(train_dir, exist_ok=True) self.train_data.seek(0) content = self.train_data.read() bytes_io = io.BytesIO(content) zip_ref = zipfile.ZipFile(bytes_io, "r") zip_ref.extractall(train_dir) # remove the __MACOSX directory macosx_dir = os.path.join(train_dir, "__MACOSX") if os.path.exists(macosx_dir): os.system(f"rm -rf {macosx_dir}") remove_non_image_files(train_dir) if self.valid_data: random_uuid = uuid.uuid4() valid_dir = os.path.join(cache_dir, "autotrain", str(random_uuid)) os.makedirs(valid_dir, exist_ok=True) self.valid_data.seek(0) content = self.valid_data.read() bytes_io = io.BytesIO(content) zip_ref = zipfile.ZipFile(bytes_io, "r") zip_ref.extractall(valid_dir) # remove the __MACOSX directory macosx_dir = os.path.join(valid_dir, "__MACOSX") if os.path.exists(macosx_dir): os.system(f"rm -rf {macosx_dir}") remove_non_image_files(valid_dir) else: train_dir = self.train_data if self.valid_data: valid_dir = self.valid_data preprocessor = ImageRegressionPreprocessor( train_data=train_dir, valid_data=valid_dir, token=self.token, project_name=self.project_name, username=self.username, local=self.local, ) return preprocessor.prepare() @dataclass class AutoTrainDataset: """ AutoTrainDataset class for handling various types of datasets and preprocessing tasks. Attributes: train_data (List[str]): List of file paths or DataFrames for training data. task (str): The type of task to perform (e.g., "text_binary_classification"). token (str): Authentication token. project_name (str): Name of the project. username (Optional[str]): Username of the project owner. Defaults to None. column_mapping (Optional[Dict[str, str]]): Mapping of column names. Defaults to None. valid_data (Optional[List[str]]): List of file paths or DataFrames for validation data. Defaults to None. percent_valid (Optional[float]): Percentage of training data to use for validation. Defaults to None. convert_to_class_label (Optional[bool]): Whether to convert labels to class labels. Defaults to False. local (bool): Whether the data is local. Defaults to False. ext (Optional[str]): File extension of the data files. Defaults to "csv". Methods: __str__(): Returns a string representation of the dataset. __post_init__(): Initializes validation data and preprocesses the data. _preprocess_data(): Preprocesses the training and validation data. num_samples(): Returns the total number of samples in the dataset. prepare(): Prepares the dataset for the specified task using the appropriate preprocessor. """ train_data: List[str] task: str token: str project_name: str username: Optional[str] = None column_mapping: Optional[Dict[str, str]] = None valid_data: Optional[List[str]] = None percent_valid: Optional[float] = None convert_to_class_label: Optional[bool] = False local: bool = False ext: Optional[str] = "csv" def __str__(self) -> str: info = f"Dataset: {self.project_name} ({self.task})\n" info += f"Train data: {self.train_data}\n" info += f"Valid data: {self.valid_data}\n" info += f"Column mapping: {self.column_mapping}\n" return info def __post_init__(self): if self.valid_data is None: self.valid_data = [] if not self.valid_data and self.percent_valid is None: self.percent_valid = 0.2 elif self.valid_data and self.percent_valid is not None: raise ValueError("You can only specify one of valid_data or percent_valid") elif self.valid_data: self.percent_valid = 0.0 self.train_df, self.valid_df = self._preprocess_data() def _preprocess_data(self): train_df = [] for file in self.train_data: if isinstance(file, pd.DataFrame): train_df.append(file) else: if self.ext == "jsonl": train_df.append(pd.read_json(file, lines=True)) else: train_df.append(pd.read_csv(file)) if len(train_df) > 1: train_df = pd.concat(train_df) else: train_df = train_df[0] valid_df = None if len(self.valid_data) > 0: valid_df = [] for file in self.valid_data: if isinstance(file, pd.DataFrame): valid_df.append(file) else: if self.ext == "jsonl": valid_df.append(pd.read_json(file, lines=True)) else: valid_df.append(pd.read_csv(file)) if len(valid_df) > 1: valid_df = pd.concat(valid_df) else: valid_df = valid_df[0] return train_df, valid_df @property def num_samples(self): return len(self.train_df) + len(self.valid_df) if self.valid_df is not None else len(self.train_df) def prepare(self): if self.task == "text_binary_classification": text_column = self.column_mapping["text"] label_column = self.column_mapping["label"] preprocessor = TextBinaryClassificationPreprocessor( train_data=self.train_df, text_column=text_column, label_column=label_column, username=self.username, project_name=self.project_name, valid_data=self.valid_df, test_size=self.percent_valid, token=self.token, seed=42, convert_to_class_label=self.convert_to_class_label, local=self.local, ) return preprocessor.prepare() elif self.task == "text_multi_class_classification": text_column = self.column_mapping["text"] label_column = self.column_mapping["label"] preprocessor = TextMultiClassClassificationPreprocessor( train_data=self.train_df, text_column=text_column, label_column=label_column, username=self.username, project_name=self.project_name, valid_data=self.valid_df, test_size=self.percent_valid, token=self.token, seed=42, convert_to_class_label=self.convert_to_class_label, local=self.local, ) return preprocessor.prepare() elif self.task == "text_token_classification": text_column = self.column_mapping["text"] label_column = self.column_mapping["label"] preprocessor = TextTokenClassificationPreprocessor( train_data=self.train_df, text_column=text_column, label_column=label_column, username=self.username, project_name=self.project_name, valid_data=self.valid_df, test_size=self.percent_valid, token=self.token, seed=42, local=self.local, convert_to_class_label=self.convert_to_class_label, ) return preprocessor.prepare() elif self.task == "text_single_column_regression": text_column = self.column_mapping["text"] label_column = self.column_mapping["label"] preprocessor = TextSingleColumnRegressionPreprocessor( train_data=self.train_df, text_column=text_column, label_column=label_column, username=self.username, project_name=self.project_name, valid_data=self.valid_df, test_size=self.percent_valid, token=self.token, seed=42, local=self.local, ) return preprocessor.prepare() elif self.task == "seq2seq": text_column = self.column_mapping["text"] label_column = self.column_mapping["label"] preprocessor = Seq2SeqPreprocessor( train_data=self.train_df, text_column=text_column, label_column=label_column, username=self.username, project_name=self.project_name, valid_data=self.valid_df, test_size=self.percent_valid, token=self.token, seed=42, local=self.local, ) return preprocessor.prepare() elif self.task == "lm_training": text_column = self.column_mapping["text"] prompt_column = self.column_mapping.get("prompt") rejected_text_column = self.column_mapping.get("rejected_text") preprocessor = LLMPreprocessor( train_data=self.train_df, text_column=text_column, prompt_column=prompt_column, rejected_text_column=rejected_text_column, username=self.username, project_name=self.project_name, valid_data=self.valid_df, test_size=self.percent_valid, token=self.token, seed=42, local=self.local, ) return preprocessor.prepare() elif self.task == "sentence_transformers": sentence1_column = self.column_mapping["sentence1"] sentence2_column = self.column_mapping["sentence2"] sentence3_column = self.column_mapping.get("sentence3") target_column = self.column_mapping.get("target") preprocessor = SentenceTransformersPreprocessor( train_data=self.train_df, username=self.username, project_name=self.project_name, valid_data=self.valid_df, test_size=self.percent_valid, token=self.token, seed=42, local=self.local, sentence1_column=sentence1_column, sentence2_column=sentence2_column, sentence3_column=sentence3_column, target_column=target_column, convert_to_class_label=self.convert_to_class_label, ) return preprocessor.prepare() elif self.task == "text_extractive_question_answering": text_column = self.column_mapping["text"] question_column = self.column_mapping["question"] answer_column = self.column_mapping["answer"] preprocessor = TextExtractiveQuestionAnsweringPreprocessor( train_data=self.train_df, text_column=text_column, question_column=question_column, answer_column=answer_column, username=self.username, project_name=self.project_name, valid_data=self.valid_df, test_size=self.percent_valid, token=self.token, seed=42, local=self.local, ) return preprocessor.prepare() elif self.task == "tabular_binary_classification": id_column = self.column_mapping["id"] label_column = self.column_mapping["label"][0] if len(id_column.strip()) == 0: id_column = None preprocessor = TabularBinaryClassificationPreprocessor( train_data=self.train_df, id_column=id_column, label_column=label_column, username=self.username, project_name=self.project_name, valid_data=self.valid_df, test_size=self.percent_valid, token=self.token, seed=42, local=self.local, ) return preprocessor.prepare() elif self.task == "tabular_multi_class_classification": id_column = self.column_mapping["id"] label_column = self.column_mapping["label"][0] if len(id_column.strip()) == 0: id_column = None preprocessor = TabularMultiClassClassificationPreprocessor( train_data=self.train_df, id_column=id_column, label_column=label_column, username=self.username, project_name=self.project_name, valid_data=self.valid_df, test_size=self.percent_valid, token=self.token, seed=42, local=self.local, ) return preprocessor.prepare() elif self.task == "tabular_single_column_regression": id_column = self.column_mapping["id"] label_column = self.column_mapping["label"][0] if len(id_column.strip()) == 0: id_column = None preprocessor = TabularSingleColumnRegressionPreprocessor( train_data=self.train_df, id_column=id_column, label_column=label_column, username=self.username, project_name=self.project_name, valid_data=self.valid_df, test_size=self.percent_valid, token=self.token, seed=42, local=self.local, ) return preprocessor.prepare() elif self.task == "tabular_multi_column_regression": id_column = self.column_mapping["id"] label_column = self.column_mapping["label"] if len(id_column.strip()) == 0: id_column = None preprocessor = TabularMultiColumnRegressionPreprocessor( train_data=self.train_df, id_column=id_column, label_column=label_column, username=self.username, project_name=self.project_name, valid_data=self.valid_df, test_size=self.percent_valid, token=self.token, seed=42, local=self.local, ) return preprocessor.prepare() elif self.task == "tabular_multi_label_classification": id_column = self.column_mapping["id"] label_column = self.column_mapping["label"] if len(id_column.strip()) == 0: id_column = None preprocessor = TabularMultiLabelClassificationPreprocessor( train_data=self.train_df, id_column=id_column, label_column=label_column, username=self.username, project_name=self.project_name, valid_data=self.valid_df, test_size=self.percent_valid, token=self.token, seed=42, local=self.local, ) return preprocessor.prepare() else: raise ValueError(f"Task {self.task} not supported")
6
0
hf_public_repos/autotrain-advanced/src
hf_public_repos/autotrain-advanced/src/autotrain/tasks.py
NLP_TASKS = { "text_binary_classification": 1, "text_multi_class_classification": 2, "text_token_classification": 4, "text_extractive_question_answering": 5, "text_summarization": 8, "text_single_column_regression": 10, "speech_recognition": 11, "natural_language_inference": 22, "lm_training": 9, "seq2seq": 28, # 27 is reserved for generic training "sentence_transformers": 30, "vlm": 31, } VISION_TASKS = { "image_binary_classification": 17, "image_multi_class_classification": 18, "image_single_column_regression": 24, "image_object_detection": 29, } TABULAR_TASKS = { "tabular_binary_classification": 13, "tabular_multi_class_classification": 14, "tabular_multi_label_classification": 15, "tabular_single_column_regression": 16, "tabular": 26, } TASKS = { **NLP_TASKS, **VISION_TASKS, **TABULAR_TASKS, }
7
0
hf_public_repos/autotrain-advanced/src
hf_public_repos/autotrain-advanced/src/autotrain/commands.py
import os import shlex import torch from autotrain import logger from autotrain.trainers.clm.params import LLMTrainingParams from autotrain.trainers.extractive_question_answering.params import ExtractiveQuestionAnsweringParams from autotrain.trainers.generic.params import GenericParams from autotrain.trainers.image_classification.params import ImageClassificationParams from autotrain.trainers.image_regression.params import ImageRegressionParams from autotrain.trainers.object_detection.params import ObjectDetectionParams from autotrain.trainers.sent_transformers.params import SentenceTransformersParams from autotrain.trainers.seq2seq.params import Seq2SeqParams from autotrain.trainers.tabular.params import TabularParams from autotrain.trainers.text_classification.params import TextClassificationParams from autotrain.trainers.text_regression.params import TextRegressionParams from autotrain.trainers.token_classification.params import TokenClassificationParams from autotrain.trainers.vlm.params import VLMTrainingParams CPU_COMMAND = [ "accelerate", "launch", "--cpu", ] SINGLE_GPU_COMMAND = [ "accelerate", "launch", "--num_machines", "1", "--num_processes", "1", ] def get_accelerate_command(num_gpus, gradient_accumulation_steps=1, distributed_backend=None): """ Generates the appropriate command to launch a training job using the `accelerate` library based on the number of GPUs and the specified distributed backend. Args: num_gpus (int): The number of GPUs available for training. If 0, training will be forced on CPU. gradient_accumulation_steps (int, optional): The number of gradient accumulation steps. Defaults to 1. distributed_backend (str, optional): The distributed backend to use. Can be "ddp" (Distributed Data Parallel), "deepspeed", or None. Defaults to None. Returns: list or str: The command to be executed as a list of strings. If no GPU is found, returns a CPU command string. If a single GPU is found, returns a single GPU command string. Otherwise, returns a list of command arguments for multi-GPU or DeepSpeed training. Raises: ValueError: If an unsupported distributed backend is specified. """ if num_gpus == 0: logger.warning("No GPU found. Forcing training on CPU. This will be super slow!") return CPU_COMMAND if num_gpus == 1: return SINGLE_GPU_COMMAND if distributed_backend in ("ddp", None): return [ "accelerate", "launch", "--multi_gpu", "--num_machines", "1", "--num_processes", str(num_gpus), ] elif distributed_backend == "deepspeed": return [ "accelerate", "launch", "--use_deepspeed", "--zero_stage", "3", "--offload_optimizer_device", "none", "--offload_param_device", "none", "--zero3_save_16bit_model", "true", "--zero3_init_flag", "true", "--deepspeed_multinode_launcher", "standard", "--gradient_accumulation_steps", str(gradient_accumulation_steps), ] else: raise ValueError("Unsupported distributed backend") def launch_command(params): """ Launches the appropriate training command based on the type of training parameters provided. Args: params (object): An instance of one of the training parameter classes. This can be one of the following: - LLMTrainingParams - GenericParams - TabularParams - TextClassificationParams - TextRegressionParams - SentenceTransformersParams - ExtractiveQuestionAnsweringParams - TokenClassificationParams - ImageClassificationParams - ObjectDetectionParams - ImageRegressionParams - Seq2SeqParams - VLMTrainingParams Returns: list: A list of command line arguments to be executed for training. Raises: ValueError: If the provided params type is unsupported. """ params.project_name = shlex.split(params.project_name)[0] cuda_available = torch.cuda.is_available() mps_available = torch.backends.mps.is_available() if cuda_available: num_gpus = torch.cuda.device_count() elif mps_available: num_gpus = 1 else: num_gpus = 0 if isinstance(params, LLMTrainingParams): cmd = get_accelerate_command(num_gpus, params.gradient_accumulation, params.distributed_backend) if num_gpus > 0: cmd.append("--mixed_precision") if params.mixed_precision == "fp16": cmd.append("fp16") elif params.mixed_precision == "bf16": cmd.append("bf16") else: cmd.append("no") cmd.extend( [ "-m", "autotrain.trainers.clm", "--training_config", os.path.join(params.project_name, "training_params.json"), ] ) elif isinstance(params, GenericParams): cmd = [ "python", "-m", "autotrain.trainers.generic", "--config", os.path.join(params.project_name, "training_params.json"), ] elif isinstance(params, TabularParams): cmd = [ "python", "-m", "autotrain.trainers.tabular", "--training_config", os.path.join(params.project_name, "training_params.json"), ] elif ( isinstance(params, TextClassificationParams) or isinstance(params, TextRegressionParams) or isinstance(params, SentenceTransformersParams) or isinstance(params, ExtractiveQuestionAnsweringParams) ): if num_gpus == 0: cmd = [ "accelerate", "launch", "--cpu", ] elif num_gpus == 1: cmd = [ "accelerate", "launch", "--num_machines", "1", "--num_processes", "1", ] else: cmd = [ "accelerate", "launch", "--multi_gpu", "--num_machines", "1", "--num_processes", str(num_gpus), ] if num_gpus > 0: cmd.append("--mixed_precision") if params.mixed_precision == "fp16": cmd.append("fp16") elif params.mixed_precision == "bf16": cmd.append("bf16") else: cmd.append("no") if isinstance(params, TextRegressionParams): cmd.extend( [ "-m", "autotrain.trainers.text_regression", "--training_config", os.path.join(params.project_name, "training_params.json"), ] ) elif isinstance(params, SentenceTransformersParams): cmd.extend( [ "-m", "autotrain.trainers.sent_transformers", "--training_config", os.path.join(params.project_name, "training_params.json"), ] ) elif isinstance(params, ExtractiveQuestionAnsweringParams): cmd.extend( [ "-m", "autotrain.trainers.extractive_question_answering", "--training_config", os.path.join(params.project_name, "training_params.json"), ] ) else: cmd.extend( [ "-m", "autotrain.trainers.text_classification", "--training_config", os.path.join(params.project_name, "training_params.json"), ] ) elif isinstance(params, TokenClassificationParams): if num_gpus == 0: cmd = [ "accelerate", "launch", "--cpu", ] elif num_gpus == 1: cmd = [ "accelerate", "launch", "--num_machines", "1", "--num_processes", "1", ] else: cmd = [ "accelerate", "launch", "--multi_gpu", "--num_machines", "1", "--num_processes", str(num_gpus), ] if num_gpus > 0: cmd.append("--mixed_precision") if params.mixed_precision == "fp16": cmd.append("fp16") elif params.mixed_precision == "bf16": cmd.append("bf16") else: cmd.append("no") cmd.extend( [ "-m", "autotrain.trainers.token_classification", "--training_config", os.path.join(params.project_name, "training_params.json"), ] ) elif ( isinstance(params, ImageClassificationParams) or isinstance(params, ObjectDetectionParams) or isinstance(params, ImageRegressionParams) ): if num_gpus == 0: cmd = [ "accelerate", "launch", "--cpu", ] elif num_gpus == 1: cmd = [ "accelerate", "launch", "--num_machines", "1", "--num_processes", "1", ] else: cmd = [ "accelerate", "launch", "--multi_gpu", "--num_machines", "1", "--num_processes", str(num_gpus), ] if num_gpus > 0: cmd.append("--mixed_precision") if params.mixed_precision == "fp16": cmd.append("fp16") elif params.mixed_precision == "bf16": cmd.append("bf16") else: cmd.append("no") if isinstance(params, ObjectDetectionParams): cmd.extend( [ "-m", "autotrain.trainers.object_detection", "--training_config", os.path.join(params.project_name, "training_params.json"), ] ) elif isinstance(params, ImageRegressionParams): cmd.extend( [ "-m", "autotrain.trainers.image_regression", "--training_config", os.path.join(params.project_name, "training_params.json"), ] ) else: cmd.extend( [ "-m", "autotrain.trainers.image_classification", "--training_config", os.path.join(params.project_name, "training_params.json"), ] ) elif isinstance(params, Seq2SeqParams): if num_gpus == 0: logger.warning("No GPU found. Forcing training on CPU. This will be super slow!") cmd = [ "accelerate", "launch", "--cpu", ] elif num_gpus == 1: cmd = [ "accelerate", "launch", "--num_machines", "1", "--num_processes", "1", ] elif num_gpus == 2: cmd = [ "accelerate", "launch", "--multi_gpu", "--num_machines", "1", "--num_processes", "2", ] else: if params.quantization in ("int8", "int4") and params.peft and params.mixed_precision == "bf16": cmd = [ "accelerate", "launch", "--multi_gpu", "--num_machines", "1", "--num_processes", str(num_gpus), ] else: cmd = [ "accelerate", "launch", "--use_deepspeed", "--zero_stage", "3", "--offload_optimizer_device", "none", "--offload_param_device", "none", "--zero3_save_16bit_model", "true", "--zero3_init_flag", "true", "--deepspeed_multinode_launcher", "standard", "--gradient_accumulation_steps", str(params.gradient_accumulation), ] if num_gpus > 0: cmd.append("--mixed_precision") if params.mixed_precision == "fp16": cmd.append("fp16") elif params.mixed_precision == "bf16": cmd.append("bf16") else: cmd.append("no") cmd.extend( [ "-m", "autotrain.trainers.seq2seq", "--training_config", os.path.join(params.project_name, "training_params.json"), ] ) elif isinstance(params, VLMTrainingParams): if num_gpus == 0: logger.warning("No GPU found. Forcing training on CPU. This will be super slow!") cmd = [ "accelerate", "launch", "--cpu", ] elif num_gpus == 1: cmd = [ "accelerate", "launch", "--num_machines", "1", "--num_processes", "1", ] elif num_gpus == 2: cmd = [ "accelerate", "launch", "--multi_gpu", "--num_machines", "1", "--num_processes", "2", ] else: if params.quantization in ("int8", "int4") and params.peft and params.mixed_precision == "bf16": cmd = [ "accelerate", "launch", "--multi_gpu", "--num_machines", "1", "--num_processes", str(num_gpus), ] else: cmd = [ "accelerate", "launch", "--use_deepspeed", "--zero_stage", "3", "--offload_optimizer_device", "none", "--offload_param_device", "none", "--zero3_save_16bit_model", "true", "--zero3_init_flag", "true", "--deepspeed_multinode_launcher", "standard", "--gradient_accumulation_steps", str(params.gradient_accumulation), ] if num_gpus > 0: cmd.append("--mixed_precision") if params.mixed_precision == "fp16": cmd.append("fp16") elif params.mixed_precision == "bf16": cmd.append("bf16") else: cmd.append("no") cmd.extend( [ "-m", "autotrain.trainers.vlm", "--training_config", os.path.join(params.project_name, "training_params.json"), ] ) else: raise ValueError("Unsupported params type") logger.info(cmd) logger.info(params) return cmd
8
0
hf_public_repos/autotrain-advanced/src
hf_public_repos/autotrain-advanced/src/autotrain/params.py
from autotrain.trainers.clm.params import LLMTrainingParams from autotrain.trainers.extractive_question_answering.params import ExtractiveQuestionAnsweringParams from autotrain.trainers.image_classification.params import ImageClassificationParams from autotrain.trainers.image_regression.params import ImageRegressionParams from autotrain.trainers.object_detection.params import ObjectDetectionParams from autotrain.trainers.sent_transformers.params import SentenceTransformersParams from autotrain.trainers.seq2seq.params import Seq2SeqParams from autotrain.trainers.tabular.params import TabularParams from autotrain.trainers.text_classification.params import TextClassificationParams from autotrain.trainers.text_regression.params import TextRegressionParams from autotrain.trainers.token_classification.params import TokenClassificationParams from autotrain.trainers.vlm.params import VLMTrainingParams
9
0
hf_public_repos/candle/candle-examples/examples
hf_public_repos/candle/candle-examples/examples/reinforcement-learning/ddpg.rs
use std::collections::VecDeque; use candle::{DType, Device, Error, Module, Result, Tensor, Var}; use candle_nn::{ func, linear, sequential::seq, Activation, AdamW, Optimizer, ParamsAdamW, Sequential, VarBuilder, VarMap, }; use rand::{distributions::Uniform, thread_rng, Rng}; use super::gym_env::GymEnv; pub struct OuNoise { mu: f64, theta: f64, sigma: f64, state: Tensor, } impl OuNoise { pub fn new(mu: f64, theta: f64, sigma: f64, size_action: usize) -> Result<Self> { Ok(Self { mu, theta, sigma, state: Tensor::ones(size_action, DType::F32, &Device::Cpu)?, }) } pub fn sample(&mut self) -> Result<Tensor> { let rand = Tensor::randn_like(&self.state, 0.0, 1.0)?; let dx = ((self.theta * (self.mu - &self.state)?)? + (self.sigma * rand)?)?; self.state = (&self.state + dx)?; Ok(self.state.clone()) } } #[derive(Clone)] struct Transition { state: Tensor, action: Tensor, reward: Tensor, next_state: Tensor, terminated: bool, truncated: bool, } impl Transition { fn new( state: &Tensor, action: &Tensor, reward: &Tensor, next_state: &Tensor, terminated: bool, truncated: bool, ) -> Self { Self { state: state.clone(), action: action.clone(), reward: reward.clone(), next_state: next_state.clone(), terminated, truncated, } } } pub struct ReplayBuffer { buffer: VecDeque<Transition>, capacity: usize, size: usize, } impl ReplayBuffer { pub fn new(capacity: usize) -> Self { Self { buffer: VecDeque::with_capacity(capacity), capacity, size: 0, } } pub fn push( &mut self, state: &Tensor, action: &Tensor, reward: &Tensor, next_state: &Tensor, terminated: bool, truncated: bool, ) { if self.size == self.capacity { self.buffer.pop_front(); } else { self.size += 1; } self.buffer.push_back(Transition::new( state, action, reward, next_state, terminated, truncated, )); } #[allow(clippy::type_complexity)] pub fn random_batch( &self, batch_size: usize, ) -> Result<Option<(Tensor, Tensor, Tensor, Tensor, Vec<bool>, Vec<bool>)>> { if self.size < batch_size { Ok(None) } else { let transitions: Vec<&Transition> = thread_rng() .sample_iter(Uniform::from(0..self.size)) .take(batch_size) .map(|i| self.buffer.get(i).unwrap()) .collect(); let states: Vec<Tensor> = transitions .iter() .map(|t| t.state.unsqueeze(0)) .collect::<Result<_>>()?; let actions: Vec<Tensor> = transitions .iter() .map(|t| t.action.unsqueeze(0)) .collect::<Result<_>>()?; let rewards: Vec<Tensor> = transitions .iter() .map(|t| t.reward.unsqueeze(0)) .collect::<Result<_>>()?; let next_states: Vec<Tensor> = transitions .iter() .map(|t| t.next_state.unsqueeze(0)) .collect::<Result<_>>()?; let terminateds: Vec<bool> = transitions.iter().map(|t| t.terminated).collect(); let truncateds: Vec<bool> = transitions.iter().map(|t| t.truncated).collect(); Ok(Some(( Tensor::cat(&states, 0)?, Tensor::cat(&actions, 0)?, Tensor::cat(&rewards, 0)?, Tensor::cat(&next_states, 0)?, terminateds, truncateds, ))) } } } fn track( varmap: &mut VarMap, vb: &VarBuilder, target_prefix: &str, network_prefix: &str, dims: &[(usize, usize)], tau: f64, ) -> Result<()> { for (i, &(in_dim, out_dim)) in dims.iter().enumerate() { let target_w = vb.get((out_dim, in_dim), &format!("{target_prefix}-fc{i}.weight"))?; let network_w = vb.get((out_dim, in_dim), &format!("{network_prefix}-fc{i}.weight"))?; varmap.set_one( format!("{target_prefix}-fc{i}.weight"), ((tau * network_w)? + ((1.0 - tau) * target_w)?)?, )?; let target_b = vb.get(out_dim, &format!("{target_prefix}-fc{i}.bias"))?; let network_b = vb.get(out_dim, &format!("{network_prefix}-fc{i}.bias"))?; varmap.set_one( format!("{target_prefix}-fc{i}.bias"), ((tau * network_b)? + ((1.0 - tau) * target_b)?)?, )?; } Ok(()) } #[allow(unused)] struct Actor<'a> { varmap: VarMap, vb: VarBuilder<'a>, network: Sequential, target_network: Sequential, size_state: usize, size_action: usize, dims: Vec<(usize, usize)>, } impl Actor<'_> { fn new(device: &Device, dtype: DType, size_state: usize, size_action: usize) -> Result<Self> { let mut varmap = VarMap::new(); let vb = VarBuilder::from_varmap(&varmap, dtype, device); let dims = vec![(size_state, 400), (400, 300), (300, size_action)]; let make_network = |prefix: &str| { let seq = seq() .add(linear( dims[0].0, dims[0].1, vb.pp(format!("{prefix}-fc0")), )?) .add(Activation::Relu) .add(linear( dims[1].0, dims[1].1, vb.pp(format!("{prefix}-fc1")), )?) .add(Activation::Relu) .add(linear( dims[2].0, dims[2].1, vb.pp(format!("{prefix}-fc2")), )?) .add(func(|xs| xs.tanh())); Ok::<Sequential, Error>(seq) }; let network = make_network("actor")?; let target_network = make_network("target-actor")?; // this sets the two networks to be equal to each other using tau = 1.0 track(&mut varmap, &vb, "target-actor", "actor", &dims, 1.0)?; Ok(Self { varmap, vb, network, target_network, size_state, size_action, dims, }) } fn forward(&self, state: &Tensor) -> Result<Tensor> { self.network.forward(state) } fn target_forward(&self, state: &Tensor) -> Result<Tensor> { self.target_network.forward(state) } fn track(&mut self, tau: f64) -> Result<()> { track( &mut self.varmap, &self.vb, "target-actor", "actor", &self.dims, tau, ) } } #[allow(unused)] struct Critic<'a> { varmap: VarMap, vb: VarBuilder<'a>, network: Sequential, target_network: Sequential, size_state: usize, size_action: usize, dims: Vec<(usize, usize)>, } impl Critic<'_> { fn new(device: &Device, dtype: DType, size_state: usize, size_action: usize) -> Result<Self> { let mut varmap = VarMap::new(); let vb = VarBuilder::from_varmap(&varmap, dtype, device); let dims: Vec<(usize, usize)> = vec![(size_state + size_action, 400), (400, 300), (300, 1)]; let make_network = |prefix: &str| { let seq = seq() .add(linear( dims[0].0, dims[0].1, vb.pp(format!("{prefix}-fc0")), )?) .add(Activation::Relu) .add(linear( dims[1].0, dims[1].1, vb.pp(format!("{prefix}-fc1")), )?) .add(Activation::Relu) .add(linear( dims[2].0, dims[2].1, vb.pp(format!("{prefix}-fc2")), )?); Ok::<Sequential, Error>(seq) }; let network = make_network("critic")?; let target_network = make_network("target-critic")?; // this sets the two networks to be equal to each other using tau = 1.0 track(&mut varmap, &vb, "target-critic", "critic", &dims, 1.0)?; Ok(Self { varmap, vb, network, target_network, size_state, size_action, dims, }) } fn forward(&self, state: &Tensor, action: &Tensor) -> Result<Tensor> { let xs = Tensor::cat(&[action, state], 1)?; self.network.forward(&xs) } fn target_forward(&self, state: &Tensor, action: &Tensor) -> Result<Tensor> { let xs = Tensor::cat(&[action, state], 1)?; self.target_network.forward(&xs) } fn track(&mut self, tau: f64) -> Result<()> { track( &mut self.varmap, &self.vb, "target-critic", "critic", &self.dims, tau, ) } } #[allow(unused)] #[allow(clippy::upper_case_acronyms)] pub struct DDPG<'a> { actor: Actor<'a>, actor_optim: AdamW, critic: Critic<'a>, critic_optim: AdamW, gamma: f64, tau: f64, replay_buffer: ReplayBuffer, ou_noise: OuNoise, size_state: usize, size_action: usize, pub train: bool, } impl DDPG<'_> { #[allow(clippy::too_many_arguments)] pub fn new( device: &Device, size_state: usize, size_action: usize, train: bool, actor_lr: f64, critic_lr: f64, gamma: f64, tau: f64, buffer_capacity: usize, ou_noise: OuNoise, ) -> Result<Self> { let filter_by_prefix = |varmap: &VarMap, prefix: &str| { varmap .data() .lock() .unwrap() .iter() .filter_map(|(name, var)| name.starts_with(prefix).then_some(var.clone())) .collect::<Vec<Var>>() }; let actor = Actor::new(device, DType::F32, size_state, size_action)?; let actor_optim = AdamW::new( filter_by_prefix(&actor.varmap, "actor"), ParamsAdamW { lr: actor_lr, ..Default::default() }, )?; let critic = Critic::new(device, DType::F32, size_state, size_action)?; let critic_optim = AdamW::new( filter_by_prefix(&critic.varmap, "critic"), ParamsAdamW { lr: critic_lr, ..Default::default() }, )?; Ok(Self { actor, actor_optim, critic, critic_optim, gamma, tau, replay_buffer: ReplayBuffer::new(buffer_capacity), ou_noise, size_state, size_action, train, }) } pub fn remember( &mut self, state: &Tensor, action: &Tensor, reward: &Tensor, next_state: &Tensor, terminated: bool, truncated: bool, ) { self.replay_buffer .push(state, action, reward, next_state, terminated, truncated) } pub fn actions(&mut self, state: &Tensor) -> Result<f32> { let actions = self .actor .forward(&state.detach().unsqueeze(0)?)? .squeeze(0)?; let actions = if self.train { (actions + self.ou_noise.sample()?)? } else { actions }; actions.squeeze(0)?.to_scalar::<f32>() } pub fn train(&mut self, batch_size: usize) -> Result<()> { let (states, actions, rewards, next_states, _, _) = match self.replay_buffer.random_batch(batch_size)? { Some(v) => v, _ => return Ok(()), }; let q_target = self .critic .target_forward(&next_states, &self.actor.target_forward(&next_states)?)?; let q_target = (rewards + (self.gamma * q_target)?.detach())?; let q = self.critic.forward(&states, &actions)?; let diff = (q_target - q)?; let critic_loss = diff.sqr()?.mean_all()?; self.critic_optim.backward_step(&critic_loss)?; let actor_loss = self .critic .forward(&states, &self.actor.forward(&states)?)? .mean_all()? .neg()?; self.actor_optim.backward_step(&actor_loss)?; self.critic.track(self.tau)?; self.actor.track(self.tau)?; Ok(()) } } // The impact of the q value of the next state on the current state's q value. const GAMMA: f64 = 0.99; // The weight for updating the target networks. const TAU: f64 = 0.005; // The capacity of the replay buffer used for sampling training data. const REPLAY_BUFFER_CAPACITY: usize = 100_000; // The training batch size for each training iteration. const TRAINING_BATCH_SIZE: usize = 100; // The total number of episodes. const MAX_EPISODES: usize = 100; // The maximum length of an episode. const EPISODE_LENGTH: usize = 200; // The number of training iterations after one episode finishes. const TRAINING_ITERATIONS: usize = 200; // Ornstein-Uhlenbeck process parameters. const MU: f64 = 0.0; const THETA: f64 = 0.15; const SIGMA: f64 = 0.1; const ACTOR_LEARNING_RATE: f64 = 1e-4; const CRITIC_LEARNING_RATE: f64 = 1e-3; pub fn run() -> Result<()> { let env = GymEnv::new("Pendulum-v1")?; println!("action space: {}", env.action_space()); println!("observation space: {:?}", env.observation_space()); let size_state = env.observation_space().iter().product::<usize>(); let size_action = env.action_space(); let mut agent = DDPG::new( &Device::Cpu, size_state, size_action, true, ACTOR_LEARNING_RATE, CRITIC_LEARNING_RATE, GAMMA, TAU, REPLAY_BUFFER_CAPACITY, OuNoise::new(MU, THETA, SIGMA, size_action)?, )?; let mut rng = rand::thread_rng(); for episode in 0..MAX_EPISODES { // let mut state = env.reset(episode as u64)?; let mut state = env.reset(rng.gen::<u64>())?; let mut total_reward = 0.0; for _ in 0..EPISODE_LENGTH { let mut action = 2.0 * agent.actions(&state)?; action = action.clamp(-2.0, 2.0); let step = env.step(vec![action])?; total_reward += step.reward; agent.remember( &state, &Tensor::new(vec![action], &Device::Cpu)?, &Tensor::new(vec![step.reward as f32], &Device::Cpu)?, &step.state, step.terminated, step.truncated, ); if step.terminated || step.truncated { break; } state = step.state; } println!("episode {episode} with total reward of {total_reward}"); for _ in 0..TRAINING_ITERATIONS { agent.train(TRAINING_BATCH_SIZE)?; } } println!("Testing..."); agent.train = false; for episode in 0..10 { // let mut state = env.reset(episode as u64)?; let mut state = env.reset(rng.gen::<u64>())?; let mut total_reward = 0.0; for _ in 0..EPISODE_LENGTH { let mut action = 2.0 * agent.actions(&state)?; action = action.clamp(-2.0, 2.0); let step = env.step(vec![action])?; total_reward += step.reward; if step.terminated || step.truncated { break; } state = step.state; } println!("episode {episode} with total reward of {total_reward}"); } Ok(()) }
0
0
hf_public_repos/candle/candle-examples/examples
hf_public_repos/candle/candle-examples/examples/reinforcement-learning/main.rs
#[cfg(feature = "mkl")] extern crate intel_mkl_src; #[cfg(feature = "accelerate")] extern crate accelerate_src; use candle::Result; use clap::{Parser, Subcommand}; mod gym_env; mod vec_gym_env; mod ddpg; mod dqn; mod policy_gradient; #[derive(Parser)] struct Args { #[command(subcommand)] command: Command, } #[derive(Subcommand)] enum Command { Pg, Ddpg, Dqn, } fn main() -> Result<()> { let args = Args::parse(); match args.command { Command::Pg => policy_gradient::run()?, Command::Ddpg => ddpg::run()?, Command::Dqn => dqn::run()?, } Ok(()) }
1
0
hf_public_repos/candle/candle-examples/examples
hf_public_repos/candle/candle-examples/examples/reinforcement-learning/gym_env.rs
//! Wrappers around the Python API of Gymnasium (the new version of OpenAI gym) use candle::{Device, Result, Tensor}; use pyo3::prelude::*; use pyo3::types::PyDict; /// The return value for a step. #[derive(Debug)] pub struct Step<A> { pub state: Tensor, pub action: A, pub reward: f64, pub terminated: bool, pub truncated: bool, } impl<A: Copy> Step<A> { /// Returns a copy of this step changing the observation tensor. pub fn copy_with_obs(&self, state: &Tensor) -> Step<A> { Step { state: state.clone(), action: self.action, reward: self.reward, terminated: self.terminated, truncated: self.truncated, } } } /// An OpenAI Gym session. pub struct GymEnv { env: PyObject, action_space: usize, observation_space: Vec<usize>, } fn w(res: PyErr) -> candle::Error { candle::Error::wrap(res) } impl GymEnv { /// Creates a new session of the specified OpenAI Gym environment. pub fn new(name: &str) -> Result<GymEnv> { Python::with_gil(|py| { let gym = py.import_bound("gymnasium")?; let make = gym.getattr("make")?; let env = make.call1((name,))?; let action_space = env.getattr("action_space")?; let action_space = if let Ok(val) = action_space.getattr("n") { val.extract()? } else { let action_space: Vec<usize> = action_space.getattr("shape")?.extract()?; action_space[0] }; let observation_space = env.getattr("observation_space")?; let observation_space = observation_space.getattr("shape")?.extract()?; Ok(GymEnv { env: env.into(), action_space, observation_space, }) }) .map_err(w) } /// Resets the environment, returning the observation tensor. pub fn reset(&self, seed: u64) -> Result<Tensor> { let state: Vec<f32> = Python::with_gil(|py| { let kwargs = PyDict::new_bound(py); kwargs.set_item("seed", seed)?; let state = self.env.call_method_bound(py, "reset", (), Some(&kwargs))?; state.bind(py).get_item(0)?.extract() }) .map_err(w)?; Tensor::new(state, &Device::Cpu) } /// Applies an environment step using the specified action. pub fn step<A: pyo3::IntoPy<pyo3::Py<pyo3::PyAny>> + Clone>( &self, action: A, ) -> Result<Step<A>> { let (state, reward, terminated, truncated) = Python::with_gil(|py| { let step = self .env .call_method_bound(py, "step", (action.clone(),), None)?; let step = step.bind(py); let state: Vec<f32> = step.get_item(0)?.extract()?; let reward: f64 = step.get_item(1)?.extract()?; let terminated: bool = step.get_item(2)?.extract()?; let truncated: bool = step.get_item(3)?.extract()?; Ok((state, reward, terminated, truncated)) }) .map_err(w)?; let state = Tensor::new(state, &Device::Cpu)?; Ok(Step { state, action, reward, terminated, truncated, }) } /// Returns the number of allowed actions for this environment. pub fn action_space(&self) -> usize { self.action_space } /// Returns the shape of the observation tensors. pub fn observation_space(&self) -> &[usize] { &self.observation_space } }
2
0
hf_public_repos/candle/candle-examples/examples
hf_public_repos/candle/candle-examples/examples/reinforcement-learning/vec_gym_env.rs
//! Vectorized version of the gym environment. use candle::{DType, Device, Result, Tensor}; use pyo3::prelude::*; #[allow(unused)] #[derive(Debug)] pub struct Step { pub obs: Tensor, pub reward: Tensor, pub is_done: Tensor, } #[allow(unused)] pub struct VecGymEnv { env: PyObject, action_space: usize, observation_space: Vec<usize>, } fn w(res: PyErr) -> candle::Error { candle::Error::wrap(res) } #[allow(unused)] impl VecGymEnv { pub fn new(name: &str, img_dir: Option<&str>, nprocesses: usize) -> Result<VecGymEnv> { Python::with_gil(|py| { let sys = py.import_bound("sys")?; let path = sys.getattr("path")?; let _ = path.call_method1( "append", ("candle-examples/examples/reinforcement-learning",), )?; let gym = py.import_bound("atari_wrappers")?; let make = gym.getattr("make")?; let env = make.call1((name, img_dir, nprocesses))?; let action_space = env.getattr("action_space")?; let action_space = action_space.getattr("n")?.extract()?; let observation_space = env.getattr("observation_space")?; let observation_space: Vec<usize> = observation_space.getattr("shape")?.extract()?; let observation_space = [vec![nprocesses].as_slice(), observation_space.as_slice()].concat(); Ok(VecGymEnv { env: env.into(), action_space, observation_space, }) }) .map_err(w) } pub fn reset(&self) -> Result<Tensor> { let obs = Python::with_gil(|py| { let obs = self.env.call_method0(py, "reset")?; let obs = obs.call_method0(py, "flatten")?; obs.extract::<Vec<f32>>(py) }) .map_err(w)?; Tensor::new(obs, &Device::Cpu)?.reshape(self.observation_space.as_slice()) } pub fn step(&self, action: Vec<usize>) -> Result<Step> { let (obs, reward, is_done) = Python::with_gil(|py| { let step = self.env.call_method_bound(py, "step", (action,), None)?; let step = step.bind(py); let obs = step.get_item(0)?.call_method("flatten", (), None)?; let obs_buffer = pyo3::buffer::PyBuffer::get_bound(&obs)?; let obs: Vec<u8> = obs_buffer.to_vec(py)?; let reward: Vec<f32> = step.get_item(1)?.extract()?; let is_done: Vec<f32> = step.get_item(2)?.extract()?; Ok((obs, reward, is_done)) }) .map_err(w)?; let obs = Tensor::from_vec(obs, self.observation_space.as_slice(), &Device::Cpu)? .to_dtype(DType::F32)?; let reward = Tensor::new(reward, &Device::Cpu)?; let is_done = Tensor::new(is_done, &Device::Cpu)?; Ok(Step { obs, reward, is_done, }) } pub fn action_space(&self) -> usize { self.action_space } pub fn observation_space(&self) -> &[usize] { &self.observation_space } }
3
0
hf_public_repos/candle/candle-examples/examples
hf_public_repos/candle/candle-examples/examples/reinforcement-learning/README.md
# candle-reinforcement-learning Reinforcement Learning examples for candle. This has been tested with `gymnasium` version `0.29.1`. You can install the Python package with: ```bash pip install "gymnasium[accept-rom-license]" ``` In order to run the examples, use the following commands. Note the additional `--package` flag to ensure that there is no conflict with the `candle-pyo3` crate. For the Policy Gradient example: ```bash cargo run --example reinforcement-learning --features=pyo3 --package candle-examples -- pg ``` For the Deep Deterministic Policy Gradient example: ```bash cargo run --example reinforcement-learning --features=pyo3 --package candle-examples -- ddpg ```
4
0
hf_public_repos/candle/candle-examples/examples
hf_public_repos/candle/candle-examples/examples/reinforcement-learning/policy_gradient.rs
use super::gym_env::{GymEnv, Step}; use candle::{DType, Device, Error, Module, Result, Tensor}; use candle_nn::{ linear, ops::log_softmax, ops::softmax, sequential::seq, Activation, AdamW, Optimizer, ParamsAdamW, VarBuilder, VarMap, }; use rand::{distributions::Distribution, rngs::ThreadRng, Rng}; fn new_model( input_shape: &[usize], num_actions: usize, dtype: DType, device: &Device, ) -> Result<(impl Module, VarMap)> { let input_size = input_shape.iter().product(); let varmap = VarMap::new(); let var_builder = VarBuilder::from_varmap(&varmap, dtype, device); let model = seq() .add(linear(input_size, 32, var_builder.pp("lin1"))?) .add(Activation::Relu) .add(linear(32, num_actions, var_builder.pp("lin2"))?); Ok((model, varmap)) } fn accumulate_rewards(steps: &[Step<i64>]) -> Vec<f64> { let mut rewards: Vec<f64> = steps.iter().map(|s| s.reward).collect(); let mut acc_reward = 0f64; for (i, reward) in rewards.iter_mut().enumerate().rev() { if steps[i].terminated { acc_reward = 0.0; } acc_reward += *reward; *reward = acc_reward; } rewards } fn weighted_sample(probs: Vec<f32>, rng: &mut ThreadRng) -> Result<usize> { let distribution = rand::distributions::WeightedIndex::new(probs).map_err(Error::wrap)?; let mut rng = rng; Ok(distribution.sample(&mut rng)) } pub fn run() -> Result<()> { let env = GymEnv::new("CartPole-v1")?; println!("action space: {:?}", env.action_space()); println!("observation space: {:?}", env.observation_space()); let (model, varmap) = new_model( env.observation_space(), env.action_space(), DType::F32, &Device::Cpu, )?; let optimizer_params = ParamsAdamW { lr: 0.01, weight_decay: 0.01, ..Default::default() }; let mut optimizer = AdamW::new(varmap.all_vars(), optimizer_params)?; let mut rng = rand::thread_rng(); for epoch_idx in 0..100 { let mut state = env.reset(rng.gen::<u64>())?; let mut steps: Vec<Step<i64>> = vec![]; loop { let action = { let action_probs: Vec<f32> = softmax(&model.forward(&state.detach().unsqueeze(0)?)?, 1)? .squeeze(0)? .to_vec1()?; weighted_sample(action_probs, &mut rng)? as i64 }; let step = env.step(action)?; steps.push(step.copy_with_obs(&state)); if step.terminated || step.truncated { state = env.reset(rng.gen::<u64>())?; if steps.len() > 5000 { break; } } else { state = step.state; } } let total_reward: f64 = steps.iter().map(|s| s.reward).sum(); let episodes: i64 = steps .iter() .map(|s| (s.terminated || s.truncated) as i64) .sum(); println!( "epoch: {:<3} episodes: {:<5} avg reward per episode: {:.2}", epoch_idx, episodes, total_reward / episodes as f64 ); let batch_size = steps.len(); let rewards = Tensor::from_vec(accumulate_rewards(&steps), batch_size, &Device::Cpu)? .to_dtype(DType::F32)? .detach(); let actions_mask = { let actions: Vec<i64> = steps.iter().map(|s| s.action).collect(); let actions_mask: Vec<Tensor> = actions .iter() .map(|&action| { // One-hot encoding let mut action_mask = vec![0.0; env.action_space()]; action_mask[action as usize] = 1.0; Tensor::from_vec(action_mask, env.action_space(), &Device::Cpu) .unwrap() .to_dtype(DType::F32) .unwrap() }) .collect(); Tensor::stack(&actions_mask, 0)?.detach() }; let states = { let states: Vec<Tensor> = steps.into_iter().map(|s| s.state).collect(); Tensor::stack(&states, 0)?.detach() }; let log_probs = actions_mask .mul(&log_softmax(&model.forward(&states)?, 1)?)? .sum(1)?; let loss = rewards.mul(&log_probs)?.neg()?.mean_all()?; optimizer.backward_step(&loss)?; } Ok(()) }
5
0
hf_public_repos/candle/candle-examples/examples
hf_public_repos/candle/candle-examples/examples/reinforcement-learning/atari_wrappers.py
import gymnasium as gym import numpy as np from collections import deque from PIL import Image from multiprocessing import Process, Pipe # atari_wrappers.py class NoopResetEnv(gym.Wrapper): def __init__(self, env, noop_max=30): """Sample initial states by taking random number of no-ops on reset. No-op is assumed to be action 0. """ gym.Wrapper.__init__(self, env) self.noop_max = noop_max self.override_num_noops = None assert env.unwrapped.get_action_meanings()[0] == 'NOOP' def reset(self): """ Do no-op action for a number of steps in [1, noop_max].""" self.env.reset() if self.override_num_noops is not None: noops = self.override_num_noops else: noops = self.unwrapped.np_random.integers(1, self.noop_max + 1) #pylint: disable=E1101 assert noops > 0 obs = None for _ in range(noops): obs, _, done, _ = self.env.step(0) if done: obs = self.env.reset() return obs class FireResetEnv(gym.Wrapper): def __init__(self, env): """Take action on reset for environments that are fixed until firing.""" gym.Wrapper.__init__(self, env) assert env.unwrapped.get_action_meanings()[1] == 'FIRE' assert len(env.unwrapped.get_action_meanings()) >= 3 def reset(self): self.env.reset() obs, _, done, _ = self.env.step(1) if done: self.env.reset() obs, _, done, _ = self.env.step(2) if done: self.env.reset() return obs class ImageSaver(gym.Wrapper): def __init__(self, env, img_path, rank): gym.Wrapper.__init__(self, env) self._cnt = 0 self._img_path = img_path self._rank = rank def step(self, action): step_result = self.env.step(action) obs, _, _, _ = step_result img = Image.fromarray(obs, 'RGB') img.save('%s/out%d-%05d.png' % (self._img_path, self._rank, self._cnt)) self._cnt += 1 return step_result class EpisodicLifeEnv(gym.Wrapper): def __init__(self, env): """Make end-of-life == end-of-episode, but only reset on true game over. Done by DeepMind for the DQN and co. since it helps value estimation. """ gym.Wrapper.__init__(self, env) self.lives = 0 self.was_real_done = True def step(self, action): obs, reward, done, info = self.env.step(action) self.was_real_done = done # check current lives, make loss of life terminal, # then update lives to handle bonus lives lives = self.env.unwrapped.ale.lives() if lives < self.lives and lives > 0: # for Qbert sometimes we stay in lives == 0 condition for a few frames # so its important to keep lives > 0, so that we only reset once # the environment advertises done. done = True self.lives = lives return obs, reward, done, info def reset(self): """Reset only when lives are exhausted. This way all states are still reachable even though lives are episodic, and the learner need not know about any of this behind-the-scenes. """ if self.was_real_done: obs = self.env.reset() else: # no-op step to advance from terminal/lost life state obs, _, _, _ = self.env.step(0) self.lives = self.env.unwrapped.ale.lives() return obs class MaxAndSkipEnv(gym.Wrapper): def __init__(self, env, skip=4): """Return only every `skip`-th frame""" gym.Wrapper.__init__(self, env) # most recent raw observations (for max pooling across time steps) self._obs_buffer = deque(maxlen=2) self._skip = skip def step(self, action): """Repeat action, sum reward, and max over last observations.""" total_reward = 0.0 done = None for _ in range(self._skip): obs, reward, done, info = self.env.step(action) self._obs_buffer.append(obs) total_reward += reward if done: break max_frame = np.max(np.stack(self._obs_buffer), axis=0) return max_frame, total_reward, done, info def reset(self): """Clear past frame buffer and init. to first obs. from inner env.""" self._obs_buffer.clear() obs = self.env.reset() self._obs_buffer.append(obs) return obs class ClipRewardEnv(gym.RewardWrapper): def reward(self, reward): """Bin reward to {+1, 0, -1} by its sign.""" return np.sign(reward) class WarpFrame(gym.ObservationWrapper): def __init__(self, env): """Warp frames to 84x84 as done in the Nature paper and later work.""" gym.ObservationWrapper.__init__(self, env) self.res = 84 self.observation_space = gym.spaces.Box(low=0, high=255, shape=(self.res, self.res, 1), dtype='uint8') def observation(self, obs): frame = np.dot(obs.astype('float32'), np.array([0.299, 0.587, 0.114], 'float32')) frame = np.array(Image.fromarray(frame).resize((self.res, self.res), resample=Image.BILINEAR), dtype=np.uint8) return frame.reshape((self.res, self.res, 1)) class FrameStack(gym.Wrapper): def __init__(self, env, k): """Buffer observations and stack across channels (last axis).""" gym.Wrapper.__init__(self, env) self.k = k self.frames = deque([], maxlen=k) shp = env.observation_space.shape assert shp[2] == 1 # can only stack 1-channel frames self.observation_space = gym.spaces.Box(low=0, high=255, shape=(shp[0], shp[1], k), dtype='uint8') def reset(self): """Clear buffer and re-fill by duplicating the first observation.""" ob = self.env.reset() for _ in range(self.k): self.frames.append(ob) return self.observation() def step(self, action): ob, reward, done, info = self.env.step(action) self.frames.append(ob) return self.observation(), reward, done, info def observation(self): assert len(self.frames) == self.k return np.concatenate(self.frames, axis=2) def wrap_deepmind(env, episode_life=True, clip_rewards=True): """Configure environment for DeepMind-style Atari. Note: this does not include frame stacking!""" assert 'NoFrameskip' in env.spec.id # required for DeepMind-style skip if episode_life: env = EpisodicLifeEnv(env) env = NoopResetEnv(env, noop_max=30) env = MaxAndSkipEnv(env, skip=4) if 'FIRE' in env.unwrapped.get_action_meanings(): env = FireResetEnv(env) env = WarpFrame(env) if clip_rewards: env = ClipRewardEnv(env) return env # envs.py def make_env(env_id, img_dir, seed, rank): def _thunk(): env = gym.make(env_id) env.reset(seed=(seed + rank)) if img_dir is not None: env = ImageSaver(env, img_dir, rank) env = wrap_deepmind(env) env = WrapPyTorch(env) return env return _thunk class WrapPyTorch(gym.ObservationWrapper): def __init__(self, env=None): super(WrapPyTorch, self).__init__(env) self.observation_space = gym.spaces.Box(0.0, 1.0, [1, 84, 84], dtype='float32') def observation(self, observation): return observation.transpose(2, 0, 1) # vecenv.py class VecEnv(object): """ Vectorized environment base class """ def step(self, vac): """ Apply sequence of actions to sequence of environments actions -> (observations, rewards, news) where 'news' is a boolean vector indicating whether each element is new. """ raise NotImplementedError def reset(self): """ Reset all environments """ raise NotImplementedError def close(self): pass # subproc_vec_env.py def worker(remote, env_fn_wrapper): env = env_fn_wrapper.x() while True: cmd, data = remote.recv() if cmd == 'step': ob, reward, done, info = env.step(data) if done: ob = env.reset() remote.send((ob, reward, done, info)) elif cmd == 'reset': ob = env.reset() remote.send(ob) elif cmd == 'close': remote.close() break elif cmd == 'get_spaces': remote.send((env.action_space, env.observation_space)) else: raise NotImplementedError class CloudpickleWrapper(object): """ Uses cloudpickle to serialize contents (otherwise multiprocessing tries to use pickle) """ def __init__(self, x): self.x = x def __getstate__(self): import cloudpickle return cloudpickle.dumps(self.x) def __setstate__(self, ob): import pickle self.x = pickle.loads(ob) class SubprocVecEnv(VecEnv): def __init__(self, env_fns): """ envs: list of gym environments to run in subprocesses """ nenvs = len(env_fns) self.remotes, self.work_remotes = zip(*[Pipe() for _ in range(nenvs)]) self.ps = [Process(target=worker, args=(work_remote, CloudpickleWrapper(env_fn))) for (work_remote, env_fn) in zip(self.work_remotes, env_fns)] for p in self.ps: p.start() self.remotes[0].send(('get_spaces', None)) self.action_space, self.observation_space = self.remotes[0].recv() def step(self, actions): for remote, action in zip(self.remotes, actions): remote.send(('step', action)) results = [remote.recv() for remote in self.remotes] obs, rews, dones, infos = zip(*results) return np.stack(obs), np.stack(rews), np.stack(dones), infos def reset(self): for remote in self.remotes: remote.send(('reset', None)) return np.stack([remote.recv() for remote in self.remotes]) def close(self): for remote in self.remotes: remote.send(('close', None)) for p in self.ps: p.join() @property def num_envs(self): return len(self.remotes) # Create the environment. def make(env_name, img_dir, num_processes): envs = SubprocVecEnv([ make_env(env_name, img_dir, 1337, i) for i in range(num_processes) ]) return envs
6
0
hf_public_repos/candle/candle-examples/examples
hf_public_repos/candle/candle-examples/examples/reinforcement-learning/dqn.rs
use std::collections::VecDeque; use rand::distributions::Uniform; use rand::{thread_rng, Rng}; use candle::{DType, Device, Module, Result, Tensor}; use candle_nn::loss::mse; use candle_nn::{linear, seq, Activation, AdamW, Optimizer, VarBuilder, VarMap}; use crate::gym_env::GymEnv; const DEVICE: Device = Device::Cpu; const EPISODES: usize = 200; const BATCH_SIZE: usize = 64; const GAMMA: f64 = 0.99; const LEARNING_RATE: f64 = 0.01; pub fn run() -> Result<()> { let env = GymEnv::new("CartPole-v1")?; // Build the model that predicts the estimated rewards given a specific state. let var_map = VarMap::new(); let vb = VarBuilder::from_varmap(&var_map, DType::F32, &DEVICE); let observation_space = *env.observation_space().first().unwrap(); let model = seq() .add(linear(observation_space, 64, vb.pp("linear_in"))?) .add(Activation::Relu) .add(linear(64, env.action_space(), vb.pp("linear_out"))?); let mut optimizer = AdamW::new_lr(var_map.all_vars(), LEARNING_RATE)?; // Initialize the model's memory. let mut memory = VecDeque::with_capacity(10000); // Start the training loop. let mut state = env.reset(0)?; let mut episode = 0; let mut accumulate_rewards = 0.0; while episode < EPISODES { // Given the current state, predict the estimated rewards, and take the // action that is expected to return the most rewards. let estimated_rewards = model.forward(&state.unsqueeze(0)?)?; let action: u32 = estimated_rewards.squeeze(0)?.argmax(0)?.to_scalar()?; // Take that action in the environment, and memorize the outcome: // - the state for which the action was taken // - the action taken // - the new state resulting of taking that action // - the actual rewards of taking that action // - whether the environment reached a terminal state or not (e.g. game over) let step = env.step(action)?; accumulate_rewards += step.reward; memory.push_back(( state, action, step.state.clone(), step.reward, step.terminated || step.truncated, )); state = step.state; // If there's enough entries in the memory, perform a learning step, where // BATCH_SIZE transitions will be sampled from the memory and will be // fed to the model so that it performs a backward pass. if memory.len() > BATCH_SIZE { // Sample randomly from the memory. let batch = thread_rng() .sample_iter(Uniform::from(0..memory.len())) .take(BATCH_SIZE) .map(|i| memory.get(i).unwrap().clone()) .collect::<Vec<_>>(); // Group all the samples together into tensors with the appropriate shape. let states: Vec<_> = batch.iter().map(|e| e.0.clone()).collect(); let states = Tensor::stack(&states, 0)?; let actions = batch.iter().map(|e| e.1); let actions = Tensor::from_iter(actions, &DEVICE)?.unsqueeze(1)?; let next_states: Vec<_> = batch.iter().map(|e| e.2.clone()).collect(); let next_states = Tensor::stack(&next_states, 0)?; let rewards = batch.iter().map(|e| e.3 as f32); let rewards = Tensor::from_iter(rewards, &DEVICE)?.unsqueeze(1)?; let non_final_mask = batch.iter().map(|e| !e.4 as u8 as f32); let non_final_mask = Tensor::from_iter(non_final_mask, &DEVICE)?.unsqueeze(1)?; // Get the estimated rewards for the actions that where taken at each step. let estimated_rewards = model.forward(&states)?; let x = estimated_rewards.gather(&actions, 1)?; // Get the maximum expected rewards for the next state, apply them a discount rate // GAMMA and add them to the rewards that were actually gathered on the current state. // If the next state is a terminal state, just omit maximum estimated // rewards for that state. let expected_rewards = model.forward(&next_states)?.detach(); let y = expected_rewards.max_keepdim(1)?; let y = (y * GAMMA * non_final_mask + rewards)?; // Compare the estimated rewards with the maximum expected rewards and // perform the backward step. let loss = mse(&x, &y)?; optimizer.backward_step(&loss)?; } // If we are on a terminal state, reset the environment and log how it went. if step.terminated || step.truncated { episode += 1; println!("Episode {episode} | Rewards {}", accumulate_rewards as i64); state = env.reset(0)?; accumulate_rewards = 0.0; } } Ok(()) }
7
0
hf_public_repos/candle/candle-examples/examples
hf_public_repos/candle/candle-examples/examples/starcoder2/main.rs
#[cfg(feature = "mkl")] extern crate intel_mkl_src; #[cfg(feature = "accelerate")] extern crate accelerate_src; use anyhow::{Error as E, Result}; use clap::Parser; use candle_transformers::models::starcoder2::Model; use candle::{DType, Device, Tensor}; use candle_examples::token_output_stream::TokenOutputStream; use candle_nn::VarBuilder; use candle_transformers::generation::LogitsProcessor; use hf_hub::{api::sync::Api, Repo, RepoType}; use tokenizers::Tokenizer; struct TextGeneration { model: Model, device: Device, tokenizer: TokenOutputStream, logits_processor: LogitsProcessor, repeat_penalty: f32, repeat_last_n: usize, } impl TextGeneration { #[allow(clippy::too_many_arguments)] fn new( model: Model, tokenizer: Tokenizer, seed: u64, temp: Option<f64>, top_p: Option<f64>, repeat_penalty: f32, repeat_last_n: usize, device: &Device, ) -> Self { let logits_processor = LogitsProcessor::new(seed, temp, top_p); Self { model, tokenizer: TokenOutputStream::new(tokenizer), logits_processor, repeat_penalty, repeat_last_n, device: device.clone(), } } fn run(&mut self, prompt: &str, sample_len: usize) -> Result<()> { use std::io::Write; self.tokenizer.clear(); let mut tokens = self .tokenizer .tokenizer() .encode(prompt, true) .map_err(E::msg)? .get_ids() .to_vec(); for &t in tokens.iter() { if let Some(t) = self.tokenizer.next_token(t)? { print!("{t}") } } std::io::stdout().flush()?; let mut generated_tokens = 0usize; let eos_token = match self.tokenizer.get_token("<|endoftext|>") { Some(token) => token, None => anyhow::bail!("cannot find the <|endoftext|> token"), }; let start_gen = std::time::Instant::now(); for index in 0..sample_len { let context_size = if index > 0 { 1 } else { tokens.len() }; let start_pos = tokens.len().saturating_sub(context_size); let ctxt = &tokens[start_pos..]; let input = Tensor::new(ctxt, &self.device)?.unsqueeze(0)?; let logits = self.model.forward(&input, start_pos)?; let logits = logits.squeeze(0)?.squeeze(0)?.to_dtype(DType::F32)?; let logits = if self.repeat_penalty == 1. { logits } else { let start_at = tokens.len().saturating_sub(self.repeat_last_n); candle_transformers::utils::apply_repeat_penalty( &logits, self.repeat_penalty, &tokens[start_at..], )? }; let next_token = self.logits_processor.sample(&logits)?; tokens.push(next_token); generated_tokens += 1; if next_token == eos_token { break; } if let Some(t) = self.tokenizer.next_token(next_token)? { print!("{t}"); std::io::stdout().flush()?; } } let dt = start_gen.elapsed(); if let Some(rest) = self.tokenizer.decode_rest().map_err(E::msg)? { print!("{rest}"); } std::io::stdout().flush()?; println!( "\n{generated_tokens} tokens generated ({:.2} token/s)", generated_tokens as f64 / dt.as_secs_f64(), ); Ok(()) } } #[derive(Parser, Debug)] #[command(author, version, about, long_about = None)] struct Args { /// Run on CPU rather than on GPU. #[arg(long)] cpu: bool, /// Enable tracing (generates a trace-timestamp.json file). #[arg(long)] tracing: bool, #[arg(long)] use_flash_attn: bool, #[arg(long)] prompt: String, /// The temperature used to generate samples. #[arg(long)] temperature: Option<f64>, /// Nucleus sampling probability cutoff. #[arg(long)] top_p: Option<f64>, /// The seed to use when generating random samples. #[arg(long, default_value_t = 299792458)] seed: u64, /// The length of the sample to generate (in tokens). #[arg(long, short = 'n', default_value_t = 10000)] sample_len: usize, #[arg(long)] model_id: Option<String>, #[arg(long, default_value = "main")] revision: String, #[arg(long)] config_file: Option<String>, #[arg(long)] tokenizer_file: Option<String>, #[arg(long)] weight_files: Option<String>, /// Penalty to be applied for repeating tokens, 1. means no penalty. #[arg(long, default_value_t = 1.1)] repeat_penalty: f32, /// The context size to consider for the repeat penalty. #[arg(long, default_value_t = 64)] repeat_last_n: usize, } fn main() -> Result<()> { use tracing_chrome::ChromeLayerBuilder; use tracing_subscriber::prelude::*; let args = Args::parse(); let _guard = if args.tracing { let (chrome_layer, guard) = ChromeLayerBuilder::new().build(); tracing_subscriber::registry().with(chrome_layer).init(); Some(guard) } else { None }; println!( "avx: {}, neon: {}, simd128: {}, f16c: {}", candle::utils::with_avx(), candle::utils::with_neon(), candle::utils::with_simd128(), candle::utils::with_f16c() ); println!( "temp: {:.2} repeat-penalty: {:.2} repeat-last-n: {}", args.temperature.unwrap_or(0.), args.repeat_penalty, args.repeat_last_n ); let start = std::time::Instant::now(); let api = Api::new()?; let model_id = match args.model_id { Some(model_id) => model_id, None => "bigcode/starcoder2-3b".to_string(), }; let repo = api.repo(Repo::with_revision( model_id, RepoType::Model, args.revision, )); let config_file = match args.config_file { Some(file) => std::path::PathBuf::from(file), None => repo.get("config.json")?, }; let tokenizer_file = match args.tokenizer_file { Some(file) => std::path::PathBuf::from(file), None => repo.get("tokenizer.json")?, }; let filenames = match args.weight_files { Some(files) => files .split(',') .map(std::path::PathBuf::from) .collect::<Vec<_>>(), None => vec![repo.get("model.safetensors")?], }; println!("retrieved the files in {:?}", start.elapsed()); let tokenizer = Tokenizer::from_file(tokenizer_file).map_err(E::msg)?; let start = std::time::Instant::now(); let config = serde_json::from_reader(std::fs::File::open(config_file)?)?; let device = candle_examples::device(args.cpu)?; let dtype = if device.is_cuda() { DType::BF16 } else { DType::F32 }; let vb = unsafe { VarBuilder::from_mmaped_safetensors(&filenames, dtype, &device)? }; let model = Model::new(&config, vb)?; println!("loaded the model in {:?}", start.elapsed()); let mut pipeline = TextGeneration::new( model, tokenizer, args.seed, args.temperature, args.top_p, args.repeat_penalty, args.repeat_last_n, &device, ); pipeline.run(&args.prompt, args.sample_len)?; Ok(()) }
8
0
hf_public_repos/candle/candle-examples/examples
hf_public_repos/candle/candle-examples/examples/replit-code/main.rs
#[cfg(feature = "mkl")] extern crate intel_mkl_src; #[cfg(feature = "accelerate")] extern crate accelerate_src; use anyhow::{Error as E, Result}; use clap::Parser; use candle_transformers::models::mpt::{Config, Model as M}; use candle_transformers::models::quantized_mpt::Model as Q; use candle::{DType, Device, Tensor}; use candle_nn::VarBuilder; use candle_transformers::generation::LogitsProcessor; use hf_hub::{api::sync::Api, Repo, RepoType}; use tokenizers::Tokenizer; enum Model { M(M), Q(Q), } impl Model { fn forward(&mut self, xs: &Tensor) -> candle::Result<Tensor> { match self { Self::M(model) => model.forward(xs), Self::Q(model) => model.forward(xs), } } } struct TextGeneration { model: Model, device: Device, tokenizer: Tokenizer, logits_processor: LogitsProcessor, repeat_penalty: f32, repeat_last_n: usize, verbose_prompt: bool, } impl TextGeneration { #[allow(clippy::too_many_arguments)] fn new( model: Model, tokenizer: Tokenizer, seed: u64, temp: Option<f64>, top_p: Option<f64>, repeat_penalty: f32, repeat_last_n: usize, verbose_prompt: bool, device: &Device, ) -> Self { let logits_processor = LogitsProcessor::new(seed, temp, top_p); Self { model, tokenizer, logits_processor, repeat_penalty, repeat_last_n, verbose_prompt, device: device.clone(), } } fn run(&mut self, prompt: &str, sample_len: usize) -> Result<()> { use std::io::Write; println!("starting the inference loop"); let tokens = self.tokenizer.encode(prompt, true).map_err(E::msg)?; if tokens.is_empty() { anyhow::bail!("Empty prompts are not supported in the phi model.") } if self.verbose_prompt { for (token, id) in tokens.get_tokens().iter().zip(tokens.get_ids().iter()) { let token = token.replace('▁', " ").replace("<0x0A>", "\n"); println!("{id:7} -> '{token}'"); } } let mut tokens = tokens.get_ids().to_vec(); let mut generated_tokens = 0usize; let eos_token = match self.tokenizer.get_vocab(true).get("<|endoftext|>") { Some(token) => *token, None => anyhow::bail!("cannot find the endoftext token"), }; print!("{prompt}"); std::io::stdout().flush()?; let start_gen = std::time::Instant::now(); for index in 0..sample_len { let context_size = if index > 0 { 1 } else { tokens.len() }; let ctxt = &tokens[tokens.len().saturating_sub(context_size)..]; let input = Tensor::new(ctxt, &self.device)?.unsqueeze(0)?; let logits = self.model.forward(&input)?; let logits = logits.squeeze(0)?.to_dtype(DType::F32)?; let logits = if self.repeat_penalty == 1. { logits } else { let start_at = tokens.len().saturating_sub(self.repeat_last_n); candle_transformers::utils::apply_repeat_penalty( &logits, self.repeat_penalty, &tokens[start_at..], )? }; let next_token = self.logits_processor.sample(&logits)?; tokens.push(next_token); generated_tokens += 1; if next_token == eos_token { break; } let token = self.tokenizer.decode(&[next_token], true).map_err(E::msg)?; print!("{token}"); std::io::stdout().flush()?; } let dt = start_gen.elapsed(); println!( "\n{generated_tokens} tokens generated ({:.2} token/s)", generated_tokens as f64 / dt.as_secs_f64(), ); Ok(()) } } #[derive(Parser, Debug)] #[command(author, version, about, long_about = None)] struct Args { /// Run on CPU rather than on GPU. #[arg(long)] cpu: bool, /// Enable tracing (generates a trace-timestamp.json file). #[arg(long)] tracing: bool, /// Display the token for the specified prompt. #[arg(long)] verbose_prompt: bool, #[arg(long)] prompt: String, /// The temperature used to generate samples. #[arg(long)] temperature: Option<f64>, /// Nucleus sampling probability cutoff. #[arg(long)] top_p: Option<f64>, /// The seed to use when generating random samples. #[arg(long, default_value_t = 299792458)] seed: u64, /// The length of the sample to generate (in tokens). #[arg(long, short = 'n', default_value_t = 1000)] sample_len: usize, #[arg(long)] model_id: Option<String>, #[arg(long)] revision: Option<String>, #[arg(long)] quantized: bool, #[arg(long)] weight_file: Option<String>, #[arg(long)] tokenizer: Option<String>, /// Penalty to be applied for repeating tokens, 1. means no penalty. #[arg(long, default_value_t = 1.)] repeat_penalty: f32, /// The context size to consider for the repeat penalty. #[arg(long, default_value_t = 64)] repeat_last_n: usize, } fn main() -> Result<()> { use tracing_chrome::ChromeLayerBuilder; use tracing_subscriber::prelude::*; let args = Args::parse(); let _guard = if args.tracing { let (chrome_layer, guard) = ChromeLayerBuilder::new().build(); tracing_subscriber::registry().with(chrome_layer).init(); Some(guard) } else { None }; println!( "avx: {}, neon: {}, simd128: {}, f16c: {}", candle::utils::with_avx(), candle::utils::with_neon(), candle::utils::with_simd128(), candle::utils::with_f16c() ); println!( "temp: {:.2} repeat-penalty: {:.2} repeat-last-n: {}", args.temperature.unwrap_or(0.), args.repeat_penalty, args.repeat_last_n ); let start = std::time::Instant::now(); let api = Api::new()?; let model_id = match args.model_id { Some(model_id) => model_id.to_string(), None => "lmz/candle-replit-code".to_string(), }; let revision = match args.revision { Some(rev) => rev.to_string(), None => "main".to_string(), }; let repo = api.repo(Repo::with_revision(model_id, RepoType::Model, revision)); let tokenizer_filename = match args.tokenizer { Some(file) => std::path::PathBuf::from(file), None => repo.get("tokenizer.json")?, }; let filename = match args.weight_file { Some(weight_file) => std::path::PathBuf::from(weight_file), None => { if args.quantized { repo.get("model-replit-code-v1_5-q4k.gguf")? } else { repo.get("model.safetensors")? } } }; println!("retrieved the files in {:?}", start.elapsed()); let tokenizer = Tokenizer::from_file(tokenizer_filename).map_err(E::msg)?; let start = std::time::Instant::now(); let device = candle_examples::device(args.cpu)?; let config = Config::replit_code_v1_5_3b(); let model = if args.quantized { let vb = candle_transformers::quantized_var_builder::VarBuilder::from_gguf(&filename, &device)?; Model::Q(Q::new(&config, vb.pp("transformer"))?) } else { let vb = unsafe { VarBuilder::from_mmaped_safetensors(&[filename], DType::F32, &device)? }; Model::M(M::new(&config, vb.pp("transformer"))?) }; println!("loaded the model in {:?}", start.elapsed()); let mut pipeline = TextGeneration::new( model, tokenizer, args.seed, args.temperature, args.top_p, args.repeat_penalty, args.repeat_last_n, args.verbose_prompt, &device, ); pipeline.run(&args.prompt, args.sample_len)?; Ok(()) }
9