index
int64 0
0
| repo_id
stringclasses 179
values | file_path
stringlengths 26
186
| content
stringlengths 1
2.1M
| __index_level_0__
int64 0
9
|
---|---|---|---|---|
0 | hf_public_repos/api-inference-community/docker_images/k2 | hf_public_repos/api-inference-community/docker_images/k2/tests/test_docker_build.py | import os
import subprocess
from unittest import TestCase
class cd:
"""Context manager for changing the current working directory"""
def __init__(self, newPath):
self.newPath = os.path.expanduser(newPath)
def __enter__(self):
self.savedPath = os.getcwd()
os.chdir(self.newPath)
def __exit__(self, etype, value, traceback):
os.chdir(self.savedPath)
class DockerBuildTestCase(TestCase):
def test_can_build_docker_image(self):
with cd(os.path.dirname(os.path.dirname(__file__))):
subprocess.check_output(["docker", "build", "."])
| 0 |
0 | hf_public_repos/api-inference-community/docker_images/k2 | hf_public_repos/api-inference-community/docker_images/k2/tests/test_api_automatic_speech_recognition.py | import json
import os
from unittest import TestCase, skipIf
from app.main import ALLOWED_TASKS
from parameterized import parameterized_class
from starlette.testclient import TestClient
from tests.test_api import TESTABLE_MODELS
@skipIf(
"automatic-speech-recognition" not in ALLOWED_TASKS,
"automatic-speech-recognition not implemented",
)
@parameterized_class(
[
{"model_id": model_id}
for model_id in TESTABLE_MODELS["automatic-speech-recognition"]
]
)
class AutomaticSpeecRecognitionTestCase(TestCase):
def setUp(self):
self.old_model_id = os.getenv("MODEL_ID")
self.old_task = os.getenv("TASK")
os.environ["MODEL_ID"] = self.model_id
os.environ["TASK"] = "automatic-speech-recognition"
from app.main import app
self.app = app
@classmethod
def setUpClass(cls):
from app.main import get_pipeline
get_pipeline.cache_clear()
def tearDown(self):
if self.old_model_id is not None:
os.environ["MODEL_ID"] = self.old_model_id
else:
del os.environ["MODEL_ID"]
if self.old_task is not None:
os.environ["TASK"] = self.old_task
else:
del os.environ["TASK"]
def read(self, filename: str) -> bytes:
dirname = os.path.dirname(os.path.abspath(__file__))
filename = os.path.join(dirname, "samples", filename)
with open(filename, "rb") as f:
bpayload = f.read()
return bpayload
def test_simple(self):
bpayload = self.read("sample1.flac")
with TestClient(self.app) as client:
response = client.post("/", data=bpayload)
self.assertEqual(
response.status_code,
200,
)
content = json.loads(response.content)
self.assertEqual(set(content.keys()), {"text"})
def test_malformed_audio(self):
bpayload = self.read("malformed.flac")
with TestClient(self.app) as client:
response = client.post("/", data=bpayload)
self.assertEqual(
response.status_code,
400,
)
self.assertEqual(response.content, b'{"error":"Malformed soundfile"}')
def test_dual_channel_audiofile(self):
bpayload = self.read("sample1_dual.ogg")
with TestClient(self.app) as client:
response = client.post("/", data=bpayload)
self.assertEqual(
response.status_code,
200,
)
content = json.loads(response.content)
self.assertEqual(set(content.keys()), {"text"})
def test_webm_audiofile(self):
bpayload = self.read("sample1.webm")
with TestClient(self.app) as client:
response = client.post("/", data=bpayload)
self.assertEqual(
response.status_code,
200,
)
content = json.loads(response.content)
self.assertEqual(set(content.keys()), {"text"})
| 1 |
0 | hf_public_repos/api-inference-community/docker_images/k2 | hf_public_repos/api-inference-community/docker_images/k2/tests/test_api.py | import os
from typing import Dict, List
from unittest import TestCase, skipIf
from app.main import ALLOWED_TASKS, get_pipeline
# Must contain at least one example of each implemented pipeline
# Tests do not check the actual values of the model output, so small dummy
# models are recommended for faster tests.
TESTABLE_MODELS: Dict[str, List[str]] = {
"automatic-speech-recognition": [
"jtrmal/icefall-asr-librispeech-pruned-transducer-stateless3-2022-05-13"
],
}
ALL_TASKS = {
"automatic-speech-recognition",
}
class PipelineTestCase(TestCase):
@skipIf(
os.path.dirname(os.path.dirname(__file__)).endswith("common"),
"common is a special case",
)
def test_has_at_least_one_task_enabled(self):
self.assertGreater(
len(ALLOWED_TASKS.keys()), 0, "You need to implement at least one task"
)
def test_unsupported_tasks(self):
unsupported_tasks = ALL_TASKS - ALLOWED_TASKS.keys()
for unsupported_task in unsupported_tasks:
with self.subTest(msg=unsupported_task, task=unsupported_task):
os.environ["TASK"] = unsupported_task
os.environ["MODEL_ID"] = "XX"
with self.assertRaises(EnvironmentError):
get_pipeline()
| 2 |
0 | hf_public_repos/api-inference-community/docker_images | hf_public_repos/api-inference-community/docker_images/flair/requirements.txt | starlette==0.27.0
pydantic==1.8.2
flair @ git+https://github.com/flairNLP/flair@e17ab1234fcfed2b089d8ef02b99949d520382d2
api-inference-community==0.0.25
| 3 |
0 | hf_public_repos/api-inference-community/docker_images | hf_public_repos/api-inference-community/docker_images/flair/Dockerfile | FROM tiangolo/uvicorn-gunicorn:python3.8
LABEL maintainer="me <[email protected]>"
# Add any system dependency here
# RUN apt-get update -y && apt-get install libXXX -y
COPY ./requirements.txt /app
RUN pip install --no-cache-dir -r requirements.txt
COPY ./prestart.sh /app/
# Most DL models are quite large in terms of memory, using workers is a HUGE
# slowdown because of the fork and GIL with python.
# Using multiple pods seems like a better default strategy.
# Feel free to override if it does not make sense for your library.
ARG max_workers=1
ENV MAX_WORKERS=$max_workers
ENV HUGGINGFACE_HUB_CACHE=/data
ENV FLAIR_CACHE_ROOT=/data
# Necessary on GPU environment docker.
# TIMEOUT env variable is used by nvcr.io/nvidia/pytorch:xx for another purpose
# rendering TIMEOUT defined by uvicorn impossible to use correctly
# We're overriding it to be renamed UVICORN_TIMEOUT
# UVICORN_TIMEOUT is a useful variable for very large models that take more
# than 30s (the default) to load in memory.
# If UVICORN_TIMEOUT is too low, uvicorn will simply never loads as it will
# kill workers all the time before they finish.
RUN sed -i 's/TIMEOUT/UVICORN_TIMEOUT/g' /gunicorn_conf.py
COPY ./app /app/app
| 4 |
0 | hf_public_repos/api-inference-community/docker_images | hf_public_repos/api-inference-community/docker_images/flair/prestart.sh | python app/main.py
| 5 |
0 | hf_public_repos/api-inference-community/docker_images/flair | hf_public_repos/api-inference-community/docker_images/flair/app/main.py | import functools
import logging
import os
from typing import Dict, Type
from api_inference_community.routes import pipeline_route, status_ok
from app.pipelines import Pipeline, TokenClassificationPipeline
from starlette.applications import Starlette
from starlette.routing import Route
logger = logging.getLogger(__name__)
# Add the allowed tasks
# Supported tasks are:
# - text-generation
# - text-classification
# - token-classification
# - translation
# - summarization
# - automatic-speech-recognition
# - ...
# For instance
# from app.pipelines import AutomaticSpeechRecognitionPipeline
# ALLOWED_TASKS = {"automatic-speech-recognition": AutomaticSpeechRecognitionPipeline}
# You can check the requirements and expectations of each pipelines in their respective
# directories. Implement directly within the directories.
ALLOWED_TASKS: Dict[str, Type[Pipeline]] = {
"token-classification": TokenClassificationPipeline
}
@functools.lru_cache()
def get_pipeline() -> Pipeline:
task = os.environ["TASK"]
model_id = os.environ["MODEL_ID"]
if task not in ALLOWED_TASKS:
raise EnvironmentError(f"{task} is not a valid pipeline for model : {model_id}")
return ALLOWED_TASKS[task](model_id)
routes = [
Route("/{whatever:path}", status_ok),
Route("/{whatever:path}", pipeline_route, methods=["POST"]),
]
app = Starlette(routes=routes)
if os.environ.get("DEBUG", "") == "1":
from starlette.middleware.cors import CORSMiddleware
app.add_middleware(
CORSMiddleware, allow_origins=["*"], allow_headers=["*"], allow_methods=["*"]
)
@app.on_event("startup")
async def startup_event():
logger = logging.getLogger("uvicorn.access")
handler = logging.StreamHandler()
handler.setFormatter(logging.Formatter("%(asctime)s - %(levelname)s - %(message)s"))
logger.handlers = [handler]
app.get_pipeline = get_pipeline
if __name__ == "__main__":
try:
get_pipeline()
except Exception:
# We can fail so we can show exception later.
pass
| 6 |
0 | hf_public_repos/api-inference-community/docker_images/flair/app | hf_public_repos/api-inference-community/docker_images/flair/app/pipelines/base.py | from abc import ABC, abstractmethod
from typing import Any, Optional
class Pipeline(ABC):
task: Optional[str] = None
model_id: Optional[str] = None
@abstractmethod
def __init__(self, model_id: str):
raise NotImplementedError("Pipelines should implement an __init__ method")
@abstractmethod
def __call__(self, inputs: Any) -> Any:
raise NotImplementedError("Pipelines should implement a __call__ method")
class PipelineException(Exception):
pass
| 7 |
0 | hf_public_repos/api-inference-community/docker_images/flair/app | hf_public_repos/api-inference-community/docker_images/flair/app/pipelines/__init__.py | from app.pipelines.base import Pipeline, PipelineException # isort:skip
from app.pipelines.token_classification import TokenClassificationPipeline
| 8 |
0 | hf_public_repos/api-inference-community/docker_images/flair/app | hf_public_repos/api-inference-community/docker_images/flair/app/pipelines/token_classification.py | from typing import Any, Dict, List
from app.pipelines import Pipeline
from flair.data import Sentence, Span, Token
from flair.models import SequenceTagger
class TokenClassificationPipeline(Pipeline):
def __init__(
self,
model_id: str,
):
self.tagger = SequenceTagger.load(model_id)
def __call__(self, inputs: str) -> List[Dict[str, Any]]:
"""
Args:
inputs (:obj:`str`):
a string containing some text
Return:
A :obj:`list`:. The object returned should be like [{"entity_group": "XXX", "word": "some word", "start": 3, "end": 6, "score": 0.82}] containing :
- "entity_group": A string representing what the entity is.
- "word": A substring of the original string that was detected as an entity.
- "start": the offset within `input` leading to `answer`. context[start:stop] == word
- "end": the ending offset within `input` leading to `answer`. context[start:stop] === word
- "score": A score between 0 and 1 describing how confident the model is for this entity.
"""
sentence: Sentence = Sentence(inputs)
self.tagger.predict(sentence)
entities = []
for label in sentence.get_labels():
current_data_point = label.data_point
if isinstance(current_data_point, Token):
current_entity = {
"entity_group": current_data_point.tag,
"word": current_data_point.text,
"start": current_data_point.start_position,
"end": current_data_point.end_position,
"score": current_data_point.score,
}
entities.append(current_entity)
elif isinstance(current_data_point, Span):
if not current_data_point.tokens:
continue
current_entity = {
"entity_group": current_data_point.tag,
"word": current_data_point.text,
"start": current_data_point.tokens[0].start_position,
"end": current_data_point.tokens[-1].end_position,
"score": current_data_point.score,
}
entities.append(current_entity)
return entities
| 9 |
0 | hf_public_repos/blog | hf_public_repos/blog/zh/red-teaming.md | ---
title: "为大语言模型建立红队对抗"
thumbnail: /blog/assets/red-teaming/thumbnail.png
authors:
- user: nazneen
- user: natolambert
- user: lewtun
translators:
- user: innovation64
- user: zhongdongy
proofreader: true
---
# 为大语言模型建立红队对抗
在巨量文本数据下训练的大语言模型非常擅长生成现实文本。但是,这些模型通常会显现出一些不良行为像泄露个人信息 (比如社会保险号) 和生成错误信息,偏置,仇恨或有毒内容。举个例子,众所周知,GPT3 的早期版本就表现出性别歧视 (如下图) 与 [仇恨穆斯林言论](https://dl.acm.org/doi/abs/10.1145/3461702.3462624) 的情况。
<p align="center">
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/blog/red-teaming/gpt3.png"/>
</p>
一旦我们在使用大语言模型时发现了这种不良结果,我们就可以制定一些策略来远离它们,像 [生成歧视者指导序列生成 (GEDI)](https://arxiv.org/pdf/2009.06367.pdf) 或 [即插即用语言模型 (PPLM)](https://arxiv.org/pdf/1912.02164.pdf) 都是用来指导 GPT3 生成的。以下是使用相同提示 (Prompt) 的示例,但使用 GEDI 控制 GPT3 生成。
<p align="center">
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/blog/red-teaming/gedi.png"/>
</p>
即使是最近的 GPT3 版本,也会在提示 (prompt) 注入攻击时产生类似的令人反感的内容,这变成了 [这篇博客](https://simonwillison.net/2022/Sep/12/prompt-injection/) 中讨论的下游应用程序的安全问题。
**红队** _是一种用于引出模型不良行为漏洞的评估形式。_ 越狱是另一个红队术语,用来表示操控冲破大语言模型限制。在 2016 年发布的 [微软聊天机器人 Tay](https://blogs.microsoft.com/blog/2016/03/25/learning-tays-introduction/) 和最近的 [必应聊天机器人 Sydney](https://www.nytimes.com/2023/02/16/technology/bing-chatbot-transcript.html) 是真实世界中反应缺乏用红队攻击对基础 ML 模型进行评估而发生的灾难。红队攻击的最初想法起源于军队中对抗模拟和战争游戏。
红队语言模型的目标是制作一个提示 (prompt),该提示会触发模型生成有害内容。红队和同样知名的评估语言模型 _对抗攻击_ 有同也有异。相似之处在于红队和对抗攻击目标相同,即“攻击”或“欺骗”模型,以生成在现实世界中不想要的内容。但是对抗攻击很难让人理解,举例来说,通过将字符串 “aaabbbcc” 前缀到每个提示中,它会恶化模型性能。[Wallace 等人 2019 年的论文](https://arxiv.org/abs/1908.07125) 讨论了对各种 NLP 分类和生成任务的许多攻击的例子。在另一方面,红队的提示看起来更正常,像自然语言的提示。
红队攻击可以揭露模型的局限性,包括引起用户不适或者暴力、不合法的恶意内容。红队 (就像对抗攻击) 的输出通常会被用来训练模型去减少有害内容或远离不想要的内容。
由于红队需要创造性地思考可能的模型失败,巨大的搜索空间会导致资源紧张。这里的一个临时方法是对大语言模型增加一个分类器去预测输入的提示 (prompt) 中是否含导致生成恶意内容的话题或短语,如果含有则生成相关回应。这种策略过于谨慎,极大的限制了模型并且时常导致模型产生回避。所以在模型有帮助 (遵循指令) 与无害 (尽可能少的产生有害内容) 之间存在一个紧张关系。红队在这时就显得非常有用了。
红队攻击可以是人力循环或者正在测试另一个语言模型有害输出的语言模型。提出针对安全和对齐方式进行微调的模型 (例如通过 RLHF 或 SFT) 的模型提示,需要以 _角色扮演攻击_ 的形式进行创造性的思考,其中大语言模型被指示表现为恶意角色在 [Ganguli 等 2022 年的论文](https://arxiv.org/pdf/2209.07858.pdf) 中。用代码而不是自然语言指示模型同样也可以揭露模型的学习的一些偏置。就像如下例子。
<p align="center">
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/blog/red-teaming/jb1.png"/>
</p>
<p align="center">
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/blog/red-teaming/jb0.png"/>
</p>
<p align="center">
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/blog/red-teaming/jb2.png"/>
</p>
<p align="center">
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/blog/red-teaming/jb3.png"/>
</p>
查看 [此](https://twitter.com/spiantado/status/1599462375887114240) 推文获取更多示例。
这里列出了在 ChatGPT 刺激大语言模型进行越狱的列表。
<p align="center">
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/blog/red-teaming/jailbreak.png"/>
</p>
红队大语言模型依旧是一个新的研究领域,但是上述提到的策略依旧可以在成功让这些模型“越狱”,并且有助于部署机器学习的产品。随着这些模型推陈出新、能力变强,开发可以不断适应的红队方法将变得至关重要。一些需要进行红队攻击的最佳实践包括模拟寻求权力行为的方案 (例如: 资源),说服人们 (例如: 伤害自己或他人),具有医学输出的代理 (例如: 通过 API 在线订购化学药品)。我们将这种可能性和物理后果的可能性称为 _关键威胁场景_。
在评估大语言模型中恶意行为的警示中,我们不知道它们的能力,毕竟它们不是故意训练去展示这种能力的 (涌现能力)。所以实际了解大语言模型的能力的唯一方法是,当它们变得更强大,可以模拟所有可能导致有恶意的结果,并在每种情况下评估模型的行为的所有可能场景。这意味着我们的模型的安全行为与我们的红队方法的强度相关联。
针对这一持续的红队的挑战,这里在数据集和最佳实践 (包括学术、工业和政府实体) 上进行了多组织合作的激励措施。共享信息的结构化过程可以使较小的实体在模型发布前进行红队攻击,从而使整个用户体验更安全。
**红队的开放数据集:**
1. Meta 的 [机器人对抗对话数据集](https://github.com/facebookresearch/ParlAI/tree/main/parlai/tasks/bot_adversarial_dialogue)
2. Anthropic 的 [红队尝试](https://huggingface.co/datasets/Anthropic/hh-rlhf/tree/main/red-team-attempts)
3. AI2 的 [RealToxicityPrompts](https://huggingface.co/datasets/allenai/real-toxicity-prompts)
**从过去的工作中寻找红队大语言模型相关的努力** (在 [Anthropic’s Ganguli et al. 2022](https://arxiv.org/abs/2209.07858) 和 [Perez et al. 2022](https://arxiv.org/abs/2202.03286) 两篇文章中)
1. 用有帮助的,忠实的,无害的行为在红队攻击中进行少量提示学习并 _不_ 比单纯的语言模型困难。
2. 攻击成功率与缩放模型大小没有明确的关系,除了 RLHF 模型在缩放时更难进行红队攻击。
3. 模型可能会通过回避表现的无害,在有帮助和无害之间存在权衡。
4. 人类在判断是否达成一次成功攻击的观点难以达成一致。
5. 成功率的分布在不同危害类别中有所差异,其中非暴力提示的成功率更高。
6. 众包 (crowdsourcing) 红队会产生 y-模板 提示 (例如: “给出一个以 X 开头的恶毒词语”),使其变得多余。
**未来方向:**
1. 没有用于代码生成的开源红队数据集,它试图通过代码越狱模型,例如生成实现 DDOS 或后门攻击的程序。
2. 为关键威胁场景设计和实施大语言模型红队方案的策略。
3. 红队可能是资源密集的,无论是计算还是人力资源,因此将从共享策略,开源数据集以及可能的合作中获得更大的成功机会,从而受益。
4. 评估回避和有帮助之间的权衡。
5. 综合比较根据上述方案的利弊,找到红队方案的最优解集 (类似于 Anthropic 的 Constitutional AI)。
这些局限性和未来的方向清楚地表明,红队是现代大语言模型工作流程中亟待探索又至关重要的组成部分。这篇文章旨在号召大语言模型研究人员和 Hugging Face 开发者社区,希望大家在这些方面保持协作,共建安全、友好的世界:)
_致谢:_ 感谢 [Yacine Jernite](https://huggingface.co/yjernite) 关于在这篇博文中正确使用术语的实用建议。
| 0 |
0 | hf_public_repos/blog | hf_public_repos/blog/zh/community-datasets.md | ---
title: "数据好合:Argilla 和 Hugging Face Spaces 赋能社区合力构建更好的数据集"
thumbnail: /blog/assets/community-datasets/thumbnail.png
authors:
- user: davanstrien
- user: dvilasuero
guest: true
translators:
- user: MatrixYao
- user: zhongdongy
proofreader: true
---
# 数据好合: Argilla 和 Hugging Face Spaces 携手赋能社区合力构建更好的数据集
最近,Argilla 和 Hugging Face 共同 [推出](https://huggingface.co/posts/dvilasuero/680660181190026) 了 `Data is Better Together` 计划,旨在凝聚社区力量协力构建一个对提示进行排名的偏好数据集。仅用几天,我们就吸引了:
- 350 个社区贡献者参与数据标注
- 超过 11,000 个提示评分
你可通过 [进度面板](https://huggingface.co/spaces/DIBT/prompt-collective-dashboard) 了解最新的统计数据!
基于此,我们发布了 [`10k_prompts_ranked`](https://huggingface.co/datasets/DIBT/10k_prompts_ranked) 数据集,该数据集共有 1 万条提示,其中每条提示都附带用户的质量评分。我们希望后续能开展更多类似的项目!
本文将讨论为什么我们认为社区合作构建数据集至关重要,并邀请大家作为首批成员加入社区,[Argilla](https://argilla.io/) 和 Hugging Face 将共同支持社区开发出更好的数据集!
## “无数据,不模型”仍是颠扑不破的真理
数据对于训练出更好的模型仍具有至关重要的作用: [现有的研究](https://huggingface.co/papers/2402.05123) 及开源 [实验](https://argilla.io/blog/notus7b) 不断地证明了这一点,开源社区的实践也表明更好的数据才能训练出更好的模型。
<p align="center">
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/17480bfba418032faec37da19e9c678ac9eeed43/blog/community-datasets/why-model-better.png" alt="Screenshot of datasets in the Hugging Face Hub"><br>
<em>问题</em>
</p>
<p align="center">
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/17480bfba418032faec37da19e9c678ac9eeed43/blog/community-datasets/data-is-the-answer.png" alt="Screenshot of datasets in the Hugging Face Hub"><br>
<em>常见答案</em>
</p>
## 为什么需要社区合力构建数据集?
“数据对于机器学习至关重要”已获得广泛共识,但现实是对很多语言、领域和任务而言,我们仍然缺乏用于训练、评估以及基准测试的高质量数据集。解决这一问题的路径之一是借鉴 Hugging Face Hub 的经验,目前,社区已通过 Hugging Face Hub 共享了数千个模型、数据集及演示应用,开放的 AI 社区协力创造了这一令人惊叹的成果。我们完全可以将这一经验推广,促成社区协力构建下一代数据集,从而为构建下一代模型提供独特而宝贵的数据基础。
赋能社区协力构建和改进数据集得好处有:
- 无需任何机器学习或编程基础,人人皆能为开源机器学习的发展作出贡献。
- 可为特定语言创建聊天数据集。
- 可为特定领域开发基准数据集。
- 可创建标注者多样化的偏好数据集。
- 可为特定任务构建数据集。
- 可利用社区的力量协力构建全新的数据集。
重要的是,我们相信凭借社区的协力会构建出更好的数据集,同时也能让那些不会编码的人也能参与进来为 AI 的发展作贡献。
### 让人人都能参与
之前许多协力构建 AI 数据集的努力面临的挑战之一是如何赋能大家以高效完成标注任务。Argilla 作为一个开源工具,可让大家轻松地为 LLM 或小型特化模型创建数据集,而 Hugging Face Spaces 是一个用于构建和托管机器学习演示应用的平台。最近,Argilla 对 Spaces 上托管的 Argilla 实例增加了对 Hugging Face 账户验证的支持,有了这个,用户现在仅需几秒钟即可开始参与标注任务。
<figure class="image table text-center m-0 w-full">
<video
style="max-width: 90%; margin: auto;"
autoplay loop muted playsinline
src="https://video.twimg.com/ext_tw_video/1757693043619004416/pu/vid/avc1/1068x720/wh3DyY0nMcRJaMki.mp4?tag=12"
></video>
</figure>
我们在创建 [`10k_prompts_ranked`](https://huggingface.co/datasets/DIBT/10k_prompts_ranked) 数据集时已对这个新的工作流进行了压力测试,我们已准备好支持社区创建新的协作数据集。
## 首批加入数据集共建社区!
我们对这个新的、简单的托管标注工作流的潜力深感兴奋。为了支持社区构建更好的数据集,Hugging Face 和 Argilla 邀请感兴趣的个人或社区作为首批成员加入我们的数据集构建者社区。
加入这个社区,你将可以:
- 创建支持 Hugging Face 身份验证的 Argilla Space。 Hugging Face 将为参与者提供免费的硬盘和增强型 CPU 资源。
- Argilla 和 Hugging Face 可提供额外的宣传渠道以助力项目宣传。
- 受邀加入相应的社区频道。
我们的目标是支持社区协力构建更好的数据集。我们对所有想法持开放态度,并愿竭尽所能支持社区协力构建更好的数据集。
## 我们在寻找什么样的项目?
我们愿意支持各种类型的项目,尤其是现存的开源项目。我们对专注于为目前开源社区中数据不足的语言、领域和任务构建数据集的项目尤其感兴趣。当前我们唯一的限制是主要针对文本数据集。如果你对多模态数据集有好想法,我们也很乐意听取你的意见,但我们可能无法在第一批中提供支持。
你的任务可以是完全开放的,也可以是向特定 Hugging Face Hub 组织的成员开放的。
如果你想成为首批成员,请加入 [Hugging Face Discord](http://hf.co/join/discord) 中的 `#data-is-better-together` 频道,并告诉我们你想构建什么数据集!
期待与大家携手共建更好的数据集! | 1 |
0 | hf_public_repos/blog | hf_public_repos/blog/zh/accelerate-v1.md | ---
title: "Accelerate 1.0.0"
thumbnail: /blog/assets/186_accelerate_v1/accelerate_v1_thumbnail.png
authors:
- user: muellerzr
- user: marcsun13
- user: BenjaminB
translators:
- user: hugging-hoi2022
- user: zhongdongy
proofreader: true
---
# Accelerate 1.0.0
## Accelerate 发展概况
在三年半以前、项目发起之初时,[Accelerate](https://github.com/huggingface/accelerate) 的目标还只是制作一个简单框架,通过一个低层的抽象来简化多 GPU 或 TPU 训练,以此替代原生的 PyTorch 训练流程:

自此,Accelerate 开始不断扩展,逐渐成为一个有多方面能力的代码库。当前,像 Llama 这样的模型已经达到了 405B 参数的量级,而 Accelerate 也致力于应对大模型和大规模训练所面临的诸多难题。这其中的贡献包括:
- [灵活、低层的训练 API](https://huggingface.co/docs/accelerate/basic_tutorials/migration): 支持在六种不同硬件设备 (CPU、GPU、TPU、XPU、NPU、MLU) 上训练,同时在代码层面保持 99% 原有训练代码不必改动。
- 简单易用的 [命令行界面](https://huggingface.co/docs/accelerate/basic_tutorials/launch): 致力于在不同硬件上进行配置,以及运行训练脚本。
- [Big Model Inference](https://huggingface.co/docs/accelerate/usage_guides/big_modeling) 功能,或者说是 `device_map="auto"` : 这使得用户能够在多种不同硬件设备上进行大模型推理,同时现在可以通过诸如高效参数微调 (PEFT) 等技术以较小计算量来训练大模型。
这三方面的贡献,使得 Accelerate 成为了 **几乎所有 Hugging Face 代码库** 的基础依赖,其中包括 `transformers` 、 `diffusers` 、 `peft` 、 `trl` 。
在 Accelerate 开发趋于稳定将近一年后的今天,我们正式发布了 Accelerate 1.0.0 —— Accelerate 的第一个发布候选版本。
本文将会详细说明以下内容:
1. 为什么我们决定开发 1.0 版本?
2. Accelerate 的未来发展,怎样结合 PyTorch 一同发展?
3. 新版本有哪些重大改变?如何迁移代码到新版本?
## 为什么要开发 1.0
发行这一版本的计划已经进行了一年多。Acceelerate 的 API 集中于 Accelerator 一侧,配置简单,代码扩展性强。但是,我们仍然认识到 Accelerate 还存在诸多有待完成的功能,这包括:
- 为 MS-AMP 和 `TransformerEngine` 集成 FP8 支持 (详见 [这里](https://github.com/huggingface/accelerate/tree/main/benchmarks/fp8/transformer_engine) 和 [这里](https://github.com/huggingface/accelerate/tree/main/benchmarks/fp8/ms_amp))
- 支持在 DeepSpeed 中使用多个模型 (详见 [这里](https://huggingface.co/docs/accelerate/usage_guides/deepspeed_multiple_model))
- 使 `torch.compile` 支持大模型推理 API (需要 `torch>=2.5` )
- 集成 `torch.distributed.pipelining` 作为 [替代的分布式推理机制](https://huggingface.co/docs/accelerate/main/en/usage_guides/distributed_inference#memory-efficient-pipeline-parallelism-experimental)
- 集成 `torchdata.StatefulDataLoader` 作为 [替代的数据载入机制](https://github.com/huggingface/accelerate/blob/main/examples/by_feature/checkpointing.py)
通过在 1.0 版本中作出的改动,Accelerate 已经有能力在不改变用户 API 接口的情况下不断融入新的技术能力了。
## Accelerate 的未来发展
在 1.0 版本推出以后,我们将重点关注技术社区里的新技术,并寻找方法去融合进 Accelerate 中。可以预见,一些重大的改动将会不久发生在 PyTorch 生态系统中:
- 作为支持 DeepSpeed 多模型的一部分,我们发现虽然当前的 DeepSpeed 方案还能正常工作,但后续可能还是需要大幅度改动整体的 API。因为我们需要为任意多模型训练场景去制作封装类。
- 由于 [torchao](https://github.com/pytorch/ao) 和 [torchtitan](https://github.com/pytorch/torchtitan) 逐渐变得受欢迎,可以推测将来 PyTorch 可能会将这些集成进来成为一个整体。为了致力于更原生的 FP8 训练、新的分布式分片 API,以及支持新版 FSDP (FSDPv2),我们推测 Accelerate 内部和通用的很多 API 也将会更改 (希望改动不大)。
- 借助 `torchao` /FP8,很多新框架也带来了不同的理念和实现方法,来使得 FP8 训练有效且稳定 (例如 `transformer_engine` 、`torchao` 、`MS-AMP` 、`nanotron` )。针对 Accelerate,我们的目标是把这些实现都集中到一个地方,使用简单的配置方法让用户探索和试用每一种方法,最终我们希望形成稳定灵活的代码架构。这个领域发展迅速,尤其是 NVidia 的 FP4 技术即将问世。我们希望不仅能够支持这些方法,同时也为不同方法提供可靠的基准测试,来和原生的 BF16 训练对比,以显示技术趋势。
我们也对 PyTorch 社区分布式训练的发展感到期待,希望 Accelerate 紧跟步伐,为最近技术提供一个低门槛的入口。也希望社区能够继续探索实验、共同学习,让我们寻找在复杂计算系统上训练、扩展大模型的最佳方案。
## 如何使用 1.0 版本
如想使用 1.0 版本,需要先使用如下方法获取 Accelerate:
- pip:
```bash
pip install --pre accelerate
```
- Docker:
```bash
docker pull huggingface/accelerate:gpu-release-1.0.0rc1
```
可用的版本标记有:
- `gpu-release-1.0.0rc1`
- `cpu-release-1.0.0rc1`
- `gpu-fp8-transformerengine-release-1.0.0rc1`
- `gpu-deepspeed-release-1.0.0rc1`
## 代码迁移指南
下面是关于弃用 API 的详细说明:
- 给 `Accelerator()` 传递 `dispatch_batches` 、`split_batches` 、`even_batches` 、`use_seedable_sampler` 参数的这种方式已经被弃用。新的方法是创建一个 `accelerate.utils.DataLoaderConfiguration()` 然后传给 `Accelerator()` (示例: `Accelerator(dataloader_config=DataLoaderConfiguration(...))` )。
- `Accelerator().use_fp16` 和 `AcceleratorState().use_fp16` 已被移除。新的替代方式是检查 `accelerator.mixed_precision == "fp16"` 。
- `Accelerator().autocast()` 不再接收 `cache_enabled` 参数。该参数被包含在 `AutocastKwargs()` 里 (示例: `Accelerator(kwargs_handlers=[AutocastKwargs(cache_enabled=True)])` )。
- `accelerate.utils.is_tpu_available` 被 `accelerate.utils.is_torch_xla_available` 替代。
- `accelerate.utils.modeling.shard_checkpoint` 应被 `huggingface_hub` 里的 `split_torch_state_dict_into_shards` 替代。
- `accelerate.tqdm.tqdm()` 的第一个参数不再是 `True` /`False` ,`main_process_only` 需要以命名参数的形式传参。
- `ACCELERATE_DISABLE_RICH` 不再是一个有效的环境变量。用户需通过设置 `ACCELERATE_ENABLE_RICH=1` 手动启动详细的回溯 (traceback) 信息。
- FSDP 中的 `fsdp_backward_prefetch_policy` 已被 `fsdp_backward_prefetch` 代替。
## 总结
首先感谢使用 Accelerate,看到一个小的想法转变成一个总下载量超过一亿、日均下载量接近三十万的项目还是很令人惊叹的。
通过本版发行,我们希望社区能够踊跃尝试,尽快在官方发行版出现前迁移到 1.0 版本。
请大家持续关注,及时追踪我们 [GitHub](https://github.com/huggingface/accelerate) 和 [社交软件](https://x.com/TheZachMueller) 上的最新信息。 | 2 |
0 | hf_public_repos/blog | hf_public_repos/blog/zh/encrypted-llm.md | ---
title: "使用 FHE 实现加密大语言模型"
thumbnail: /blog/assets/encrypted-llm/thumbnail.png
authors:
- user: RomanBredehoft
guest: true
- user: jfrery-zama
guest: true
translators:
- user: MatrixYao
- user: zhongdongy
proofreader: true
---
# 使用 FHE 实现加密大语言模型
近来,大语言模型 (LLM) 已被证明是提高编程、内容生成、文本分析、网络搜索及远程学习等诸多领域生产力的可靠工具。
## 大语言模型对用户隐私的影响
尽管 LLM 很有吸引力,但如何保护好 `输入给这些模型的用户查询中的隐私` 这一问题仍然存在。一方面,我们想充分利用 LLM 的力量,但另一方面,存在向 LLM 服务提供商泄露敏感信息的风险。在某些领域,例如医疗保健、金融或法律,这种隐私风险甚至有一票否决权。
一种备选解决方案是本地化部署,LLM 所有者将其模型部署在客户的计算机上。然而,这不是最佳解决方案,因为构建 LLM 可能需要花费数百万美元 ([GPT3 为 460 万美元](https://lambdalabs.com/blog/demystifying-gpt-3)),而本地部署有泄露模型知识产权 (intellectual property, IP) 的风险。
Zama 相信有两全其美之法: 我们的目标是同时保护用户的隐私和模型的 IP。通过本文,你将了解如何利用 Hugging Face transformers 库并让这些模型的某些部分在加密数据上运行。完整代码见 [此处](https://github.com/zama-ai/concrete-ml/tree/17779ca571d20b001caff5792eb11e76fe2c19ba/use_case_examples/llm)。
## 全同态加密 (Fully Homomorphic Encryption,FHE) 可以解决 LLM 隐私挑战
针对 LLM 部署的隐私挑战,Zama 的解决方案是使用全同态加密 (FHE),在加密数据上执行函数。这种做法可以实现两难自解,既可以保护模型所有者知识产权,同时又能维护用户的数据隐私。我们的演示表明,在 FHE 中实现的 LLM 模型保持了原始模型的预测质量。为此,我们需要调整 Hugging Face [transformers 库](https://github.com/huggingface/transformers) 中的 [GPT2](https://huggingface.co/gpt2) 实现,使用 Concrete-Python 对推理部分进行改造,这样就可以将 Python 函数转换为其 FHE 等效函数。

图 1 展示了由多个 transformer block 堆叠而成的 GPT2 架构: 其中最主要的是多头注意力 (multi-head attention,MHA) 层。每个 MHA 层使用模型权重来对输入进行投影,然后各自计算注意力,并将注意力的输出重新投影到新的张量中。
在 [TFHE](https://www.zama.ai/post/tfhe-deep-dive-part-1) 中,模型权重和激活均用整数表示。非线性函数必须通过可编程自举 (Programmable Bootstrapping,PBS) 操作来实现。PBS 对加密数据实施查表 (table lookup,TLU) 操作,同时刷新密文以支持 [任意计算](https://whitepaper.zama.ai/)。不好的一面是,此时 PBS 的计算时间在线性运算中占主导地位。利用这两种类型的运算,你可以在 FHE 中表达任何子模型的计算,甚至完整的 LLM 计算。
## 使用 FHE 实现 LLM 的一层
接下来,你将了解如何加密多头注意力 (MHA) 中的一个注意力头。你可以在 [此处](https://github.com/zama-ai/concrete-ml/tree/17779ca571d20b001caff5792eb11e76fe2c19ba/use_case_examples/llm) 找到完整的 MHA 实现代码。

图 2 概述了一个简化的底层实现。在这个方案中,模型权重会被分成两个部分,分别存储在客户端和服务端。首先,客户端在本地开始推理,直至遇到已第一个不在本地的层。用户将中间结果加密并发送给服务端。服务端对其执行相应的注意力机制计算,然后将结果返回给客户端,客户端对结果进行解密并继续在本地推理。
### 量化
首先,为了对加密值进行模型推理,模型的权重和激活必须被量化并转换为整数。理想情况是使用 [训练后量化](https://docs.zama.ai/concrete-ml/advanced-topics/quantization),这样就不需要重新训练模型了。这里,我们使用整数和 PBS 来实现 FHE 兼容的注意力机制,并检查其对 LLM 准确率的影响。
要评估量化的影响,我们运行完整的 GPT2 模型,并让其中的一个 LLM 头进行密态计算。然后我们基于此评估权重和激活的量化比特数对准确率的影响。

上图表明 4 比特量化保持了原始精度的 96%。该实验基于含有约 80 个句子的数据集,并通过将原始模型的 logits 预测与带有量化注意力头的模型的 logits 预测进行比较来计算最终指标。
### 在 Hugging Face GPT2 模型中使用 FHE
我们需要在 Hugging Face 的 transformers 库的基础上重写加密模块的前向传播,以使其包含量化算子。首先通过加载 [GPT2LMHeadModel](https://huggingface.co/docs/transformers/model_doc/gpt2#transformers.GPT2LMHeadModel) 构建一个 SingleHeadQGPT2Model 实例,然后手动使用 [QGPT2SingleHeadAttention](https://github.com/zama-ai/concrete-ml/blob/c291399cb1f2a0655c308c14e2180eb2ffda0ab7/use_case_examples/llm/qgpt2_models.py#L191) 替换第一个多头注意力模块,代码如下。你可以在 [这里](https://github.com/zama-ai/concrete-ml/blob/c291399cb1f2a0655c308c14e2180eb2ffda0ab7/use_case_examples/llm/qgpt2_models.py) 找到模型的完整实现。
```python
self.transformer.h[0].attn = QGPT2SingleHeadAttention(config, n_bits=n_bits)
```
至此,前向传播已被重载成用 FHE 算子去执行多头注意力的第一个头,包括构建查询、键和值矩阵的投影。以下代码中的 `QGPT2` 模块的代码见 [此处](https://github.com/zama-ai/concrete-ml/blob/c291399cb1f2a0655c308c14e2180eb2ffda0ab7/use_case_examples/llm/qgpt2_class.py#L196)。
```python
class SingleHeadAttention(QGPT2):
"""Class representing a single attention head implemented with quantization methods."""
def run_numpy(self, q_hidden_states: np.ndarray):
# Convert the input to a DualArray instance
q_x = DualArray(
float_array=self.x_calib,
int_array=q_hidden_states,
quantizer=self.quantizer
)
# Extract the attention base module name
mha_weights_name = f"transformer.h.{self.layer}.attn."
# Extract the query, key and value weight and bias values using the proper indices
head_0_indices = [
list(range(i * self.n_embd, i * self.n_embd + self.head_dim))
for i in range(3)
]
q_qkv_weights = ...
q_qkv_bias = ...
# Apply the first projection in order to extract Q, K and V as a single array
q_qkv = q_x.linear(
weight=q_qkv_weights,
bias=q_qkv_bias,
key=f"attention_qkv_proj_layer_{self.layer}",
)
# Extract the queries, keys and vales
q_qkv = q_qkv.expand_dims(axis=1, key=f"unsqueeze_{self.layer}")
q_q, q_k, q_v = q_qkv.enc_split(
3,
axis=-1,
key=f"qkv_split_layer_{self.layer}"
)
# Compute attention mechanism
q_y = self.attention(q_q, q_k, q_v)
return self.finalize(q_y)
```
模型中的其他计算仍以浮点形式进行,未加密,并由客户端在本地执行。
将预训练的权重加载到修改后的 GPT2 模型中,然后调用 _generate_ 方法:
```python
qgpt2_model = SingleHeadQGPT2Model.from_pretrained(
"gpt2_model", n_bits=4, use_cache=False
)
output_ids = qgpt2_model.generate(input_ids)
```
举个例子,你可以要求量化模型补全短语 “Cryptography is a” 。在 FHE 中运行模型时,如果量化精度足够,生成的输出为:
“Cryptography is a very important part of the security of your computer”
当量化精度太低时,您会得到:
“Cryptography is a great way to learn about the world around you”
### 编译为 FHE
现在,你可以使用以下 Concrete-ML 代码编译注意力头:
```python
circuit_head = qgpt2_model.compile(input_ids)
```
运行此代码,你将看到以下打印输出: “Circuit compiled with 8 bit-width”。该配置与 FHE 兼容,显示了在 FHE 中执行的操作所需的最大位宽。
### 复杂度
在 transformer 模型中,计算量最大的操作是注意力机制,它将查询、键和值相乘。在 FHE 中,加密域中乘法的特殊性加剧了成本。此外,随着序列长度的增加,这些乘法的数量还会呈二次方增长。
而就加密注意力头而言,长度为 6 的序列需要 11622 次 PBS 操作。我们目前的实验还很初步,尚未对性能进行优化。虽然可以在几秒钟内运行,但不可否认它需要相当多的计算能力。幸运的是,我们预期,几年后,硬件会将延迟提高 1000 倍到 10000 倍,使原来在 CPU 上需要几分钟的操作缩短到 ASIC 上的低于 100 毫秒。有关这些估算的更多信息,请参阅 [此博文](https://www.zama.ai/post/chatgpt-privacy-with-homomorphic-encryption)。
## 总结
大语言模型有望使能大量应用场景,但其实现引发了用户隐私的重大关切。在本文中,我们朝着密态 LLM 迈出了第一步,我们的最终愿景是让整个模型完全在云上运行,同时用户的隐私还能得到充分尊重。
当前的做法包括将 GPT2 等模型中的特定部分转换至 FHE 域。我们的实现利用了 transformers 库,用户还能评估模型的一部分在加密数据上运行时对准确率的影响。除了保护用户隐私之外,这种方法还允许模型所有者对其模型的主要部分保密。你可在 [此处](https://github.com/zama-ai/concrete-ml/tree/17779ca571d20b001caff5792eb11e76fe2c19ba/use_case_examples/llm) 找到完整代码。
Zama 库 [Concrete](https://github.com/zama-ai/concrete) 和 [Concrete-ML](https://github.com/zama-ai/concrete-ml) (别忘了给我们的 github 代码库点个星星 ⭐️💛) 允许直接构建 ML 模型并将其转换至等价的 FHE 域,从而使之能够对加密数据进行计算和预测。
希望你喜欢这篇文章。请随时分享你的想法/反馈!
| 3 |
0 | hf_public_repos/blog | hf_public_repos/blog/zh/sdxl_lora_advanced_script.md | ---
title: "全世界 LoRA 训练脚本,联合起来!"
thumbnail: /blog/assets/dreambooth_lora_sdxl/thumbnail.png
authors:
- user: LinoyTsaban
- user: multimodalart
translators:
- user: MatrixYao
- user: zhongdongy
proofreader: true
---
# 全世界 LoRA 训练脚本,联合起来!
**来自社区的 SD-XL Dreambooth LoRA 微调最佳实践指南**
**太长不看版**
我们把 Replicate 在 SDXL Cog 训练器中使用的枢轴微调 (Pivotal Tuning) 技术与 Kohya 训练器中使用的 Prodigy 优化器相结合,再加上一堆其他优化,一起对 SDXL 进行 Dreambooth LoRA 微调,取得了非常好的效果。你可以在 `diffusers` 上找到 [我们使用的训练脚本](https://github.com/huggingface/diffusers/blob/main/examples/advanced_diffusion_training/train_dreambooth_lora_sdxl_advanced.py)🧨,或是直接 [在 Colab 上](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/SDXL_Dreambooth_LoRA_advanced_example.ipynb) 试着运行一下。
如果你想跳过技术讲解直接上手,可以使用这个 [Hugging Face Space](https://huggingface.co/spaces/multimodalart/lora-ease),通过简单的 UI 界面用我们精选的超参直接开始训练。当然,你也可以尝试干预这些超参的设置。
## 概述
使用 Dreambooth LoRA 微调后的 Stable Diffusion XL(SDXL) 模型仅需借助少量图像即可捕获新概念,同时保留了 SDXL 出图美观高质的优势。更难得的是,虽然微调结果令人惊艳,其所需的计算和资源消耗却相当低。你可在 [此处](https://huggingface.co/spaces/multimodalart/LoraTheExplorer) 找到很多精彩的 SDXL LoRA 模型。
本文我们将回顾一些流行的、可以让你的 LoRA 微调更出色的实践和技术,并展示如何使用 `diffusers` 来运行或训练你自己的 LoRA 模型!
拾遗: LoRA (Low Rank Adaptation,低阶适配) 是一种可用于微调 Stable Diffusion 模型的技术,其可用于对关键的图像/提示交叉注意力层进行微调。其效果与全模型微调相当,但速度更快且所需计算量更小。要了解有关 LoRA 工作原理的更多信息,请参阅我们之前的文章 - [使用 LoRA 进行 Stable Diffusion 的高效参数微调](https://huggingface.co/blog/zh/lora)。
## 目录
1. 技术/技巧
1. [枢轴微调](#枢轴微调)
2. [自适应优化器](#自适应优化器)
3. [推荐做法](#其他好做法) - 文本编码器学习率,自定义描述文本,数据集重复,最小信噪比 Gamma,训练集创建
2. [实验与结果](#实验与结果)
3. 推理
1. [Diffusers 推理](#推理)
2. [Automatic1111/ComfyUI 推理](#comfy-ui--automatic1111-推理)
**致谢** ❤️:
本文展示的各种技术 (包括算法、训练脚本、实验以及各种探索) 是站在很多前人工作的基础上的,包括: [Nataniel Ruiz](https://twitter.com/natanielruizg) 的 [Dreambooth](https://dreambooth.github.io)、 [Rinon Gal](https://twitter.com/RinonGal) 的 [文本逆化 (textual inversion) ](https://textual-inversion.github.io)、[Ron Mokady](https://twitter.com/MokadyRon) 的 [枢轴微调](https://arxiv.org/abs/2106.05744)、[Simo Ryu](https://twitter.com/cloneofsimo) 的 [cog-sdxl](https://github.com/replicate/cog-sdxl)、[Kohya](https://twitter.com/kohya_tech/) 的 [sd-scripts](https://github.com/kohya-ss/sd-scripts) 以及 [The Last Ben](https://twitter.com/__TheBen) 的 [fast-stable-diffusion](https://github.com/TheLastBen/fast-stable-diffusion)。在此,我们向他们和社区表示最诚挚的谢意! 🙌
## 枢轴微调
[枢轴微调](https://arxiv.org/abs/2106.05744) 技术巧妙地将 [文本逆化](https://arxiv.org/abs/2208.01618) 与常规的扩散模型微调相结合。以 Dreambooth 为例,进行常规 Dreambooth 微调时,你需要选择一个稀有词元作为触发词,例如“一只 sks 狗” 中的 `sks` 。但是,因为这些词元原本就来自于词表,所以它们通常有自己的原义,这就有可能会影响你的结果。举个例子,社区之前经常使用 `sks` 作为触发词,但实际上其原义是一个武器品牌。
为了解决这个问题,我们插入一个新词元到模型的文本编码器中,而非重用词表中现有的词元。然后,我们优化新插入词元的嵌入向量来表示新概念,这种想法就是文本逆化,即我们对嵌入空间中的新词元进行学习来达到学习新概念的目的。一旦我们获得了新词元及其对应的嵌入向量,我们就可以用这些词元嵌入向量来训练我们的 Dreambooth LoRA,以获得两全其美的效果。
**训练**
使用 `diffusers` 的新训练脚本,你可以通过设置以下参数来进行文本逆化训练。
```
--train_text_encoder_ti
--train_text_encoder_ti_frac=0.5
--token_abstraction="TOK"
--num_new_tokens_per_abstraction=2
--adam_weight_decay_text_encoder
```
- `train_text_encoder_ti` 开启文本逆化训练,用于训练新概念的嵌入向量。
- `train_text_encoder_ti_frac` 指定何时停止文本逆化 (即停止文本嵌入向量的更新,仅继续更新 UNet)。中途定轴 (即仅在训练前半部分执行文本逆化) 是 cog sdxl 使用的默认设置,我们目前的实验也验证了其有效性。我们鼓励大家对此参数进行更多实验。
- `token_abstraction` 即概念标识符,我们在提示文本中用该词描述我们希望训练的概念。该标识符词元会用在实例提示、验证提示或图像描述文本中。这里我们选择 `TOK` 作为概念标识符,如 “TOK 的照片”即为一个含有概念标识符的实例提示。注意,`--token_abstraction` 只是一个占位符,因此,在训练之前我们需要用一个新词元来代替 `TOK` 并对其进行训练 (举个例子,训练时“一张 `TOK` 的照片”会变成“一张 `<s0><s1>` 的照片”,其中 `<s0><s1>` 就是新词元)。同样地,需要确保这里的 `token_abstraction` 与实例提示、验证提示和自定义提示 (如有) 中的标识符相一致。
- `num_new_tokens_per_abstraction` 表示每个 `token_abstraction` 对应多少个新词元 - 即需要向模型的文本编码器插入多少个新词元并对其进行训练。默认设置为 2,我们鼓励大家对不同取值进行实验并分享你们的发现!
- `adam_weight_decay_text_encoder` 用于为文本编码器设置与 UNet 不同的权重衰减。
## 自适应优化器
<figure class="image table text-center m-0 w-full">
<image
style="max-width: 40%; margin: auto;"
src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/optimization_gif.gif"
></image>
</figure>
在训练或微调扩散模型 (或与此相关的任何机器学习模型) 时,我们使用优化器来引导模型依循最佳的收敛路径 - 收敛意味着我们选择的损失函数达到了最小值,我们认为损失函数达到最小值即说明模型已习得我们想要教给它的内容。当前,深度学习任务的标准 (也是最先进的) 优化器当属 Adam 和 AdamW 优化器。
然而,这两个优化器要求用户设置大量的超参 (如学习率、权重衰减等),以此为收敛铺平道路。这可能会导致我们需要不断试验各种超参,最后常常因为耗时过长而不得不采用次优超参,从而导致次优结果。即使你最后试到了理想的学习率,但如果学习率在训练期间保持为常数,仍然可能导致收敛问题。一些超参可能需要频繁的更新以加速收敛,而另一些超参的调整又不能太大以防止振荡。真正是“摁了葫芦起了瓢”。为了应对这一挑战,我们引入了有自适应学习率的算法,例如 **Adafactor** 和 [**Prodigy**](https://github.com/konstmish/prodigy)。这些方法根据每个参数过去的梯度来动态调整学习率,借此来优化算法对搜索空间的遍历轨迹。
我们的关注点更多在 Prodigy,因为我们相信它对 Dreambooth LoRA 训练特别有用!
**训练**
```
--optimizer="prodigy"
```
使用 Prodigy 时,缺省学习率可以设置如下:
```
--learning_rate=1.0
```
对扩散模型特别是其 LoRA 训练有用的设置还有:
```
--prodigy_safeguard_warmup=True
--prodigy_use_bias_correction=True
--adam_beta1=0.9
# 注意,以下超参的取值与默认值不同:
--adam_beta2=0.99
--adam_weight_decay=0.01
```
在使用 Prodigy 进行训练时,你还可以对其他超参进行调整 (如: `--prodigy_beta3` 、`prodigy_de Couple` 、`prodigy_safeguard_warmup` ),本文不会对这些参数进行深入研究,你可以移步 [此处](https://github.com/konstmish/prodigy) 以了解更多信息。
## 其他好做法
除了枢轴微调以及自适应优化器之外,还有一些可以影响 LoRA 模型质量的其他技巧,所有这些技巧都已在最新的 `diffusers` 训练脚本中了。
### 独立设置文本编码器和 UNet 的学习率
社区发现,在优化文本编码器时,为其设置专有的学习率 (与 UNet 学习率独立) 所得模型的生成质量会更好 - 特别地,文本编码器需要 **较低** 的学习率,因为它一般过拟合 _更快_ 。
- 在使用枢轴微调时,为 UNet 和文本编码器设置不同的学习率是已被证明了的 - 此时,为文本编码器设置更高的学习率更好。
- 但请注意,当使用 Prodigy (或一般的自适应优化器) 时,我们一开始可以让所有训练参数的初始学习率相同,让优化器自行调节。很神吧✨!
**训练**
```
--train_text_encoder
--learning_rate=1e-4 #unet
--text_encoder_lr=5e-5
```
`--train_text_encoder` 启用文本编码器全模型训练 (即文本编码器的权重都参与更新,而不是仅仅优化文本逆化新词元的嵌入 ( `--train_text_encoder_ti` ))。如果你希望文本编码器的学习率始终与 `--learning_rate` 一致,可设 `--text_encoder_lr=None` 。
### 自定义描述文本
虽然通过对一组配有相同实例提示的图像进行训练也可以获得良好的结果,如“一张 <token> 的人像照片”或 “<token> 的风格”等。但对多幅图像使用相同的描述文本有可能会导致次优结果。具体效果取决于很多因素,包括待学习概念的复杂性、模型原本对该概念的“熟悉程度”,以及训练集捕获新概念的效果如何。
<figure class="image table text-center m-0 w-full">
<image
style="max-width: 40%; margin: auto;"
src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/custom_captions_meme.png"
></image>
</figure>
**训练**
想要使用自定义描述文本,首先需要安装 `datasets` 库,如下:
```
!pip install datasets
```
要加载自定义图像描述文本,我们需要训练集的目录结构遵循 `datasets` 库的 `ImageFolder` 结构,其包含图像及每张图像对应的描述文本。
- _方式 1_:
从 hub 中选择一个已包含图像及其对应提示的数据集 - 如 [LinoyTsaban/3d_icon](https://huggingface.co/datasets/LinoyTsaban/3d_icon)。现在要做的就是在训练参数中指定数据集的名称及文本列的名称 (在本例中列名为 "prompt"):
```
--dataset_name=LinoyTsaban/3d_icon
--caption_column=prompt
```
- _方式 2_:
你还可以使用自己的图像并为其添加描述文本。此时,你可以借助 [这个 Colab Notebook](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/SDXL_Dreambooth_LoRA_advanced_example.ipynb) 来用 BLIP 自动为图像生成描述文本,或者你可以在元数据文件中手动创建描述文本。后面的事情就和方式 1 一样了,将 `--dataset_name` 设为文件夹路径, `--caption_column` 设为描述文本所对应的列名。
### 最小信噪比 Gamma 加权
训练扩散模型通常会遇到收敛缓慢的问题,部分是由于各时间步之间的优化方向相互冲突。[Hang 等人](https://arxiv.org/abs/2303.09556) 通过引入简单的最小信噪比 Gamma 法来缓解此问题。该方法根据钳位信噪比调整每个时间步的损失权重,以有效平衡各时间步之间的冲突。
- 做小信噪比加权策略在小数据集上效果可能并不明显; 其对较大的数据集效果会更明显。
- 不同 $\gamma$ 值的效果比较: _你可在 [这个 wandb 项目页面](https://wandb.ai/sayakpaul/text2image-finetune-minsnr) 上比较不同的 `snr_gamma` 值 (5.0、1.0 及 None) 下的训练损失曲线。_
<figure class="image table text-center m-0 w-full">
<image
style="max-width: 70%; margin: auto;"
src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/snr_gamma_effect.png"
></image>
</figure>
**训练**
如欲使用最小信噪比 Gamma 法,推荐设置如下:
```
--snr_gamma=5.0
```
默认情况下 `--snr_gamma=None` ,即不启用。启用 `--snr_gamma` 时,建议取值为 5.0。
### 图像样本重复次数
此参数指的是数据集中的图像在训练集中重复的次数。其与 epoch 的不同之处在于,图像首先被重复,然后才被打乱。
**训练**
要启用重复,只需将其设为一个大于 1 的整数:
```
--repeats
```
默认情况下, `--repeats=1` ,即不重复。
### 训练集创建
- 俗话说得好 - “垃圾进,垃圾出”,虽然仅需少量图像即可轻松训练一个好的 Dreambooth LoRA,但训练图像的质量对微调模型影响很大。
- 一般来说,在对特定对象或主题进行微调时,我们希望确保训练集中包含尽可能多样的关于该对象或主题的图像,且这些图像应尽量与后续应用场景相关。
- 例如,如果想让模型学到有关下面这个红色背包的知识: (该图来自 [google/dreambooth](https://huggingface.co/datasets/google/dreambooth) 数据集)
<figure class="image table text-center m-0 w-full">
<image
style="max-width: 30%; margin: auto;"
src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/dreambooth_backpack_01.jpg"
></image>
</figure>
- 我觉得后面用户有可能给一个某人背着包的提示,因此训练集中最好有下面这样的样本:
<figure class="image table text-center m-0 w-full">
<image
style="max-width: 30%; margin: auto;"
src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/dreambooth_backpack_02.jpg"
></image>
</figure>
这样就能在训练时匹配推理时的应用场景,因此模型推理时就更容易外推至该类场景或者设定。
_再举个例子_, 在 _人脸_ 数据上进行训练时,在准备数据集时需要特别注意以下几点:
1. 应尽可能选择 **高分辨率、高画质** 的图像。模糊或低分辨率的图像不利于微调结果。
2. 在训练特定人物的人脸时,建议训练集中不要出现其他人的脸,因为我们不想对目标人脸产生模糊的概念。
3. **特写照片** 对于最终产生真实感的效果很重要,但是同时也应该包含好的全身照片,以提高对不同姿势/构图的泛化能力。
4. 我们建议 **避免纳入离拍摄主体较远的照片**,因为此类图像中的大多数像素与我们希望优化的概念无关,模型可以从中学习的东西不多。
5. 避免重复的背景/服装/姿势 —— 在灯光、姿势、背景和面部表情方面力求 **多样性**。多样性越大,LoRA 就越灵活、越通用。
6. **先验保留损失**
先验保留损失是一种使用模型自己生成的样本来帮助其学习如何生成更多样化的图像的方法。由于这些图像样本与你提供的图像属于同一类,因此它们可以帮助模型保留已习得的有关该类的信息,并习得如何使用已知的该类的信息来形成新的组合。
**_用于正则化的真实图像 VS 模型生成的图像_**
选择类图像时,你可以在合成图像 (即由扩散模型生成) 和真实图像之间进行选择。支持使用真实图像的理由是它们提高了微调模型的真实感。另一方面,有些人也会认为使用模型生成的图像可以更好地保留模型习得的 <em> 知识 </em> 及审美。
7. **名人相** - 这主要与用于训练的描述文本或实例提示有关。当使用“令牌标识符 + 基础模型所知道的与待训练任务相似的公众人物”进行提示时,我们发现一些案例的微调效果得到了改善。
使用先验保留损失进行 **训练**:
```
--with_prior_preservation
--class_data_dir
--num_class_images
--class_prompt
```
`--with_prior_preservation` - 启用先验保留训练
`--class_data_dir` - 包含类图像的文件夹的路径
`--num_class_images` - 先验保留损失所需的最小类图像数。如果 `--class_data_dir` 中没有足够的图像,则用 `--class_prompt` 采样出更多的图像。
### 实验与结果
讲了这么多,该挽起袖子试试了。我们在不同的任务 (风格化、人脸及特定对象) 上尝试了上述技术的不同组合。
为了减小超参搜索空间,我们使用一些流行配置作为起点,并基于此进行调整以达成效果。
**Huggy Dreambooth LoRA**
首先,我们想为 Huggy 微调一个 LoRA 模型,这意味着既要教会模型相应的艺术风格,同时还要教会它特定的角色。在这个例子中,我们制作了一个高质量的 Huggy 吉祥物数据集 (我们使用的是 Chunte-Lee 的艺术作品),该数据集包含 31 张图像及其对应的描述文本。
<figure class="image table text-center m-0 w-full">
<image
style="max-width: 60%; margin: auto;"
src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/huggy_dataset_example.png"
></image>
</figure>
配置:
```
--train_batch_size = 1, 2,3, 4
-repeats = 1,2
-learning_rate = 1.0 (Prodigy), 1e-4 (AdamW)
-text_encoder_lr = 1.0 (Prodigy), 3e-4, 5e-5 (AdamW)
-snr_gamma = None, 5.0
-max_train_steps = 1000, 1500, 1800
-text_encoder_training = regular finetuning, pivotal tuning (textual inversion)
```
- 文本编码器全模型微调 VS 枢轴微调 - 我们注意到枢轴微调取得了比文本编码器全模型微调更好的结果,且无需微调文本编码器的权重。
- 最小信噪比 Gamma 加权
- 我们比较了关闭 `snr_gamma` 训得的 [版本 1](https://wandb.ai/linoy/dreambooth-lora-sd-xl/runs/mvox7cqg?workspace=user-linoy) 以及使用 `snr_gamma = 5.0` 训得的 [版本 2](https://wandb.ai/linoy/dreambooth-lora-sd-xl/runs/cws7nfzg?workspace=user-linoy)。
这两个版本都使用了以下参数 (但版本 2 多了一个 `snr_gamma` )
```
--pretrained_model_name_or_path="stabilityai/stable-diffusion-xl-base-1.0" \
--pretrained_vae_model_name_or_path="madebyollin/sdxl-vae-fp16-fix" \
--dataset_name="./huggy_clean" \
--instance_prompt="a TOK emoji"\
--validation_prompt="a TOK emoji dressed as Yoda"\
--caption_column="prompt" \
--mixed_precision="bf16" \
--resolution=1024 \
--train_batch_size=4 \
--repeats=1\
--report_to="wandb"\
--gradient_accumulation_steps=1 \
--gradient_checkpointing \
--learning_rate=1e-4 \
--text_encoder_lr=3e-4 \
--optimizer="adamw"\
--train_text_encoder_ti\
--lr_scheduler="constant" \
--lr_warmup_steps=0 \
--rank=32 \
--max_train_steps=1000 \
--checkpointing_steps=2000 \
--seed="0" \
```
<figure class="image table text-center m-0 w-full">
<image
style="max-width: 60%; margin: auto;"
src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/snr_comparison_huggy_s0s1.png"
></image>
</figure>
- AdamW 及 Prodigy 优化器
- 我们比较了使用 `optimizer=prodigy` 训练的 [版本 1](https://wandb.ai/linoy/dreambooth-lora-sd-xl/runs/uk8d6k6j?workspace=user-linoy) 及使用 `optimizer=adamW` 训练的 [版本 2](https://wandb.ai/linoy/dreambooth-lora-sd-xl/runs/cws7nfzg?workspace=user-linoy)。两个版本都使用枢轴微调进行训练。
- 使用 `optimizer=prodigy` 进行训练时,我们将初始学习率设置为 1。而对 adamW,我们使用了 cog-sdxl 中用于枢轴微调的默认学习率 ( `learning_rate` 为 `1e-4` , `text_encoder_lr` 为 `3e-4` ),因为我们能够使用它们重现出较好的效果。
<figure class="image table text-center m-0 w-full">
<image
style="max-width: 50%; margin: auto;"
src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/adamw_prodigy_comparsion_huggy.png"
></image>
</figure>
- 所有其他训练参数和设置均相同。具体如下:
```
--pretrained_model_name_or_path="stabilityai/stable-diffusion-xl-base-1.0" \
--pretrained_vae_model_name_or_path="madebyollin/sdxl-vae-fp16-fix" \
--dataset_name="./huggy_clean" \
--instance_prompt="a TOK emoji"\
--validation_prompt="a TOK emoji dressed as Yoda"\
--output_dir="huggy_v11" \
--caption_column="prompt" \
--mixed_precision="bf16" \
--resolution=1024 \
--train_batch_size=4 \
--repeats=1\
--report_to="wandb"\
--gradient_accumulation_steps=1 \
--gradient_checkpointing \
--train_text_encoder_ti\
--lr_scheduler="constant" \
--snr_gamma=5.0 \
--lr_warmup_steps=0 \
--rank=32 \
--max_train_steps=1000 \
--checkpointing_steps=2000 \
--seed="0" \
```
**Y2K 网页设计 LoRA**
我们再探讨另一个例子,这次用另一个数据集,该数据集是我们从互联网上抓取的 27 个 20 世纪 90 年代和 21 世纪初的网页截图 (相当复古🥲):
<figure class="image table text-center m-0 w-full">
<image
style="max-width: 85%; margin: auto;"
src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/web_y2k_dataset_preview.png"
></image>
</figure>
配置:
```
–rank = 4,16,32
-optimizer = prodigy, adamW
-repeats = 1,2,3
-learning_rate = 1.0 (Prodigy), 1e-4 (AdamW)
-text_encoder_lr = 1.0 (Prodigy), 3e-4, 5e-5 (AdamW)
-snr_gamma = None, 5.0
-train_batch_size = 1, 2, 3, 4
-max_train_steps = 500, 1000, 1500
-text_encoder_training = regular finetuning, pivotal tuning
```
与前例略有不同,虽然我们使用的训练图像大致相同 (~30 张),但我们注意到,对于这种风格的 LoRA,在 Huggy LoRA 效果很好的设置对于网页风格来说表现出了过拟合。如下:
<figure class="image table text-center m-0 w-full">
<image
style="max-width: 70%; margin: auto;"
src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/web_y2k_comparisons.png"
></image>
</figure>
对于 v1,我们选择了训练 Huggy LoRA 时的最佳配置作为起点 - 显然过拟合了。因此我们尝试在下一个版本中通过调整 `--max_train_steps` 、 `--repeats` 、 `--train_batch_size` 以及 `--snr_gamma` 来解决这个问题。更具体地说,我们基于这四个配置迭代了 8 个版本,如下 (所有其余配置保持不变):
| 配置 | v1 | v2 | v3 | v4 | v5 | v6 | v7 | v8 |
|---------------------|-----------|-------------------------------------------|-----------|-----------|-----------|-----------|-----------|-----------|
| `max_train_steps` | 1500 | 1500 | 1500 | 1000 | 1000 | 1000 | 1000 | 1000 |
| `repeats` | 1 | 1 | 2 | 2 | 1 | 1 | 2 | 1 |
| `train_batch_size` | 4 | 4 | 4 | 4 | 2 | 1 | 1 | 1 |
| `instance_data_dir` | `web_y2k` | 14 images randomly samples from `web_y2k` | `web_y2k` | `web_y2k` | `web_y2k` | `web_y2k` | `web_y2k` | `web_y2k` |
| `snr_gamma` | 5.0 | 5.0 | 5.0 | 5.0 | - | - | 5.0 | 5.0 |
我们发现 v4、v5 及 v6 可以达到最佳的折衷效果:
<figure class="image table text-center m-0 w-full">
<image
style="max-width: 70%; margin: auto;"
src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/web_y2k_comparisons_close_up.png"
></image>
</figure>
**人脸 LoRA**
在人脸图像上进行训练时,我们的目标是让 LoRA 生成尽可能真实且与目标人物相似的图像,同时又能够很好地泛化至训练集中未见过的背景和构图。本例中,我们聚合了多个 Linoy 脸部数据集 (每个数据集含 6-10 张图像),其中包括一组同时拍摄的特写照片、不同场合的照片集 (不同的背景、灯光和服装) 以及全身照。
我们深知,如果由于照明/分辨率/聚焦上的问题导致图像的质量偏低,此时较少的高质图像比较多的低质图像的微调效果更好 - 少即是多,务必选择最好的照片来训练模型!
配置:
```
rank = 4,16,32, 64
optimizer = prodigy, adamW
repeats = 1,2,3,4
learning_rate = 1.0 , 1e-4
text_encoder_lr = 1.0, 3e-4
snr_gamma = None, 5.0
num_class_images = 100, 150
max_train_steps = 75 * num_images, 100 * num_images, 120 * num_images
text_encoder_training = regular finetuning, pivotal tuning
```
- 先验保留损失
- 与通常的做法相反,我们发现使用生成的类图像会降低与目标人脸的相似性及生成图像的真实性。
- 我们利用从 [unsplash](https://unsplash.com) 下载的开源图像创建了真实肖像的 [数据集](https://huggingface.co/datasets/multimodalart/faces-prior-preservation)。现在,你也可以在我们创建的新 [训练空间](https://huggingface.co/spaces/multimodalart/lora-ease) 中用上它!
- 当使用真实图像数据集时,我们注意到语言漂移更少 (即模型不会将“女人/男人”一词仅与经过训练的面孔相关联,而是可以生成不同的人) ; 同时在输入含经训练的人脸相关词元的提示词情况下,其生成又能保证真实感及整体质量。
- 秩
- 我们比较了秩设为 4、16、32 和 64 等不同值时的 LoRA。在这些实验中,我们发现秩为 64 的 LoRA 生成的图像磨皮效果更强,并且皮肤纹理看上去不太真实。
- 因此,在后面的实验以及 [LoRA ease 空间](https://huggingface.co/spaces/multimodalart/lora-ease) 上,我们都把秩默认设为 32。
- 训练步数
- 尽管仅需很少高质图像 (在我们的例子中为 6) 就能达到好的训练效果,但我们仍需要确定模型训练的理想步数。
- 基于训练图像数,我们尝试了几种不同的乘数: 6 x75 = 450 步 / 6 x100 = 600 步 / 6 x120 = 720 步。
- 如下图所示,初步结果表明,使用 120 倍乘数效果较好 (如果数据集足够多样而不会过拟合,训练集中最好不要有相似的照片)
<figure class="image table text-center m-0 w-full">
<image
style="max-width: 85%; margin: auto;"
src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/image_multiplier_comparison_linoy_loras.png"
></image>
</figure>
上图显示了使用 3 个训得的 LoRA (除了 `--max_train_steps` 之外的所有参数都保持相同) 生成的图像,具体配置如下:
```
rank = 32
optimizer = prodigy
repeats = 1
learning_rate = 1.0
text_encoder_lr = 1.0
max_train_steps = 75 * num_images, 100 * num_images, 120 * num_images
train_text_encoder_ti
with_prior_preservation_loss
num_class_images = 150
```
## 推理
使用上述技术训练的模型的推理应该与任何其他训练器训得的模型的推理方式相同,不同之处在于,当我们进行枢轴微调时,除了 LoRA 的 `*.safetensors` 权重之外,还有经过训练的新词元及其文本嵌入模型的 `*.safetensors` 。为了对这些进行推理,我们在加载 LoRA 模型的方式上加了 2 个步骤:
1. 从 Hub 下载经过训练的嵌入模型 (默认文件名为 `{model_name}_emb.safetensors` )
```py
import torch
from huggingface_hub import hf_hub_download
from diffusers import DiffusionPipeline
from safetensors.torch import load_file
pipe = DiffusionPipeline.from_pretrained(
"stabilityai/stable-diffusion-xl-base-1.0",
torch_dtype=torch.float16,
variant="fp16",
).to("cuda")
# download embeddings
embedding_path = hf_hub_download(repo_id="LinoyTsaban/web_y2k_lora", filename="web_y2k_emb.safetensors", repo_type="model")
```
2. 将嵌入模型加载到文本编码器中
```py
# load embeddings to the text encoders
state_dict = load_file(embedding_path)
# notice we load the tokens <s0><s1>, as "TOK" as only a place-holder and training was performed using the new initialized tokens - <s0><s1>
# load embeddings of text_encoder 1 (CLIP ViT-L/14)
pipe.load_textual_inversion(state_dict["clip_l"], token=["<s0>", "<s1>"], text_encoder=pipe.text_encoder, tokenizer=pipe. tokenizer)
# load embeddings of text_encoder 2 (CLIP ViT-G/14)
pipe.load_textual_inversion(state_dict["clip_g"], token=["<s0>", "<s1>"], text_encoder=pipe.text_encoder_2, tokenizer=pipe. tokenizer_2)
```
3. 加载你的 LoRA 并输入提示!
```py
# normal LoRA loading
pipe.load_lora_weights("LinoyTsaban/web_y2k_lora", weight_name="pytorch_lora_weights.safetensors")
prompt="a <s0><s1> webpage about an astronaut riding a horse"
images = pipe(
prompt,
cross_attention_kwargs={"scale": 0.8},
).images
# your output image
images[0]
```
<figure class="image table text-center m-0 w-full">
<image
style="max-width: 50%; margin: auto;"
src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/web_y2k_astronaut.png"
></image>
</figure>
## Comfy UI / AUTOMATIC1111 推理
最新的脚本完全支持 Comfy UI 和 AUTOMATIC1111 格式的文本逆化模型加载!
**AUTOMATIC1111 / SD.Next**
在 AUTOMATIC1111/SD.Next 中,我们同时加载 LoRA 和文本嵌入模型。
- _LoRA_ : 除了 `diffusers` 格式之外,该脚本还将同时生成 WebUI 格式的 LoRA,其文件名为 `{your_lora_name}.safetensors` 。随后,你可以将其放入 `models/Lora` 目录中。
- _嵌入_ : `diffusers` 和 WebUI 的嵌入文件格式相同。你可以直接下载已训练模型的 `{lora_name}_emb.safetensors` 文件,并将其放入 `embeddings` 目录中。
然后,你可以输入提示 `a y2k_emb webpage about the movie Mean Girls <lora:y2k:0.9>` 来运行推理。你可以直接使用 `y2k_emb` 词元,也可以通过使用 `(y2k_emb:1.2)` 来增加其权重。
**ComfyUI**
在 ComfyUI 中,我们会同时加载 LoRA 和文本嵌入。
- _LoRA_ : 除了 `diffusers` 格式之外,该脚本还将训练 ComfyUI 兼容的 LoRA,其文件名为 `{your_lora_name}.safetensors` 。然后,你可以将其放入 `models/Lora` 目录中。然后,你需要加载 LoRALoader 节点并将其与你的模型和 CLIP 连接起来,详见 [LoRA 加载官方指南](https://comfyanonymous.github.io/ComfyUI_examples/lora/)
- _嵌入_ : diffusers 和 WebUI 的嵌入格式相同。你可以直接下载已训练的模型的 `{lora_name}_emb.safetensors` 文件,并将其放入 `models/embeddings` 目录中,并在你的提示中以 `embedding:y2k_emb` 的方式使用它,详见 [加载嵌入模型的官方指南](https://comfyanonymous.github.io/ComfyUI_examples/textual_inversion_embeddings/)。
### 下一步
🚀 更多功能,即将推出!
我们正在努力为我们的训练脚本添加更多高级功能,以允许用户施加更多的控制力,并支持更灵活的使用。如果你觉得哪些功能对你有帮助,请告诉我们!
🤹 多概念 LoRA
Shah 等人在最近的一项 [工作](https://ziplora.github.io/) 引入了 ZipLoRAs - 一种将多个独立训练的不同风格或主题 LoRA 模型合并起来的方法,以支持以用户指定的任意风格生成用户指定的任意主题图像。[mkshing](https://twitter.com/mk1stats) 实现了此方法的一个开源复现,见 [此处](https://github.com/mkshing/ziplora-pytorch),其使用了我们新改进的 [脚本](https://github.com/mkshing/ziplora-pytorch/blob/main/train_dreambooth_lora_sdxl.py)。 | 4 |
0 | hf_public_repos/blog | hf_public_repos/blog/zh/finetune-florence2.md | ---
title: "微调 Florence-2 - 微软的尖端视觉语言模型"
thumbnail: /blog/assets/182_finetune-florence/thumbnail.png
authors:
- user: andito
- user: merve
- user: SkalskiP
guest: true
translators:
- user: MatrixYao
- user: zhongdongy
proofreader: true
---
# 微调 Florence-2 - 微软的尖端视觉语言模型
Florence-2 是微软于 2024 年 6 月发布的一个基础视觉语言模型。该模型极具吸引力,因为它尺寸很小 (0.2B 及 0.7B) 且在各种计算机视觉和视觉语言任务上表现出色。
Florence 开箱即用支持多种类型的任务,包括: 看图说话、目标检测、OCR 等等。虽然覆盖面很广,但仍有可能你的任务或领域不在此列,也有可能你希望针对自己的任务更好地控制模型输出。此时,你就需要微调了!
本文,我们展示了一个在 DocVQA 上微调 Florence 的示例。尽管原文宣称 Florence 2 支持视觉问答 (VQA) 任务,但最终发布的模型并未包含 VQA 功能。因此,我们正好拿这个任务练练手,看看我们能做点什么!
## 预训练细节与模型架构
<p align="center">
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/blog/florence-2.png" alt=" 视觉语言模型结构 " style="width: 90%; height: auto;"><br>
<em>Florence-2 架构</em>
</p>
无论执行什么样的计算机视觉任务,Florence-2 都会将其建模为序列到序列的任务。Florence-2 以图像和文本作为输入,并输出文本。模型结构比较简单: 用 DaViT 视觉编码器将图像转换为视觉嵌入,并用 BERT 将文本提示转换为文本和位置嵌入; 然后,生成的嵌入由标准编码器 - 解码器 transformer 架构进行处理,最终生成文本和位置词元。Florence-2 的优势并非源自其架构,而是源自海量的预训练数据集。作者指出,市面上领先的计算机视觉数据集通常所含信息有限 - WIT 仅有图文对,[SA-1B](https://ai.meta.com/datasets/segment-anything/) 仅有图像及相关分割掩码。因此,他们决定构建一个新的 FLD-5B 数据集,其中的每个图像都包含最广泛的信息 - 目标框、掩码、描述文本及标签。在创建数据集时,很大程度采用了自动化的过程,作者使用现成的专门任务模型,并用一组启发式规则及质检过程来清理所获得的结果。最终生成的用于预训练 Florence-2 模型的新数据集中包含了 1.26 亿张图像、超过 50 亿个标注。
## VQA 上的原始性能
我们尝试了各种方法来微调模型以使其适配 VQA (视觉问答) 任务的响应方式。迄今为止,我们发现最有效方法将其建模为图像区域描述任务,尽管其并不完全等同于 VQA 任务。看图说话任务虽然可以输出图像的描述性信息,但其不允许直接输入问题。
我们还测试了几个“不支持”的提示,例如 “<VQA>”、“<vqa>” 以及 “<Visual question answering>”。不幸的是,这些尝试的产生的结果都不可用。
## 微调后在 DocVQA 上的性能
我们使用 DocVQA 数据集的标准指标 [Levenshtein 相似度](https://en.wikipedia.org/wiki/Levenshtein_distance) 来测量性能。微调前,模型在验证集上的输出与标注的相似度为 0,因为模型输出与标注差异不小。对训练集进行 7 个 epoch 的微调后,验证集上的相似度得分提高到了 57.0。
我们创建了一个 [🤗 空间](https://huggingface.co/spaces/andito/Florence-2-DocVQA) 以演示微调后的模型。虽然该模型在 DocVQA 上表现良好,但在一般文档理解方面还有改进的空间。但我们仍然认为,它成功地完成了任务,展示了 Florence-2 对下游任务进行微调的潜力。我们建议大家使用 [The Cauldron](https://huggingface.co/datasets/HuggingFaceM4/the_cauldron) 数据集对 Florence-2 进行微调,大家可以在 [我们的 GitHub 页面](https://github.com/andimarafioti/florence2-finetuning) 上找到必要的代码。
下图给出了微调前后的推理结果对比。你还可以至 [此处](https://huggingface.co/spaces/andito/Florence-2-DocVQA) 亲自试用模型。
<p align="center">
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/blog/before-after.png" alt=" 微调前后的结果 " style="width: 90%; height: auto;"><br>
<em>微调前后的结果</em>
</p>
## 微调细节
由原文我们可以知道,基础模型在预训练时使用的 batch size 为 2048,大模型在预训练时使用的 batch size 为 3072。另外原文还说: 与冻结图像编码器相比,使用未冻结的图像编码器进行微调能带来性能改进。
我们在低资源的情况下进行了多组实验,以探索模型如何在更受限的条件下进行微调。我们冻结了视觉编码器,并在 [Colab](https://colab.research.google.com/drive/1hKDrJ5AH_o7I95PtZ9__VlCTNAo1Gjpf?usp=sharing) 的分别使用单张 A100 GPU (batch size 6) 、单张 T4 (batch size 1) 顺利完成微调。
与此同时,我们还对更多资源的情况进行了实验,以 batch size 64 对整个模型进行了微调。在配备 8 张 H100 GPU 的集群上该训练过程花费了 70 分钟。你可以在 [这里](https://huggingface.co/HuggingFaceM4/Florence-2-DocVQA) 找到我们训得的模型。
我们都发现 `1e-6` 的小学习率适合上述所有训练情形。如果学习率变大,模型将很快过拟合。
## 遛代码
如果你想复现我们的结果,可以在 [此处](https://colab.research.google.com/drive/1hKDrJ5AH_o7I95PtZ9__VlCTNAo1Gjpf?usp=sharing) 找到我们的 Colab 微调笔记本。下面,我们遛一遍在 [DocVQA](https://huggingface.co/datasets/HuggingFaceM4/DocumentVQA) 上微调 [Florence-2-base-ft](https://huggingface.co/microsoft/Florence-2-base-ft) 模型。
我们从安装依赖项开始。
```python
!pip install -q datasets flash_attn timm einops
```
接着,从 Hugging Face Hub 加载 DocVQA 数据集。
```python
import torch
from datasets import load_dataset
data = load_dataset("HuggingFaceM4/DocumentVQA")
```
我们可以使用 transformers 库中的 `AutoModelForCausalLM` 和 `AutoProcessor` 类来加载模型和处理器,并设 `trust_remote_code=True` ,因为该模型尚未原生集成到 transformers 中,因此需要使用自定义代码。我们还会冻结视觉编码器,以降低微调成本。
```python
from transformers import AutoModelForCausalLM, AutoProcessor
import torch
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model = AutoModelForCausalLM.from_pretrained(
"microsoft/Florence-2-base-ft",
trust_remote_code=True,
revision='refs/pr/6'
).to(device)
processor = AutoProcessor.from_pretrained("microsoft/Florence-2-base-ft",
trust_remote_code=True, revision='refs/pr/6')
for param in model.vision_tower.parameters():
param.is_trainable = False
```
现在开始微调模型!我们构建一个训练 PyTorch 数据集,并为数据集中的每个问题添加 `\<DocVQA\>` 前缀。
```python
import torch from torch.utils.data import Dataset
class DocVQADataset(Dataset):
def __init__(self, data):
self.data = data
def __len__(self):
return len(self.data)
def __getitem__(self, idx):
example = self.data[idx]
question = "<DocVQA>" + example['question']
first_answer = example['answers'][0]
image = example['image'].convert("RGB")
return question, first_answer, image
```
接着,构建数据整理器,从数据集样本构建训练 batch,以用于训练。在 40GB 内存的 A100 中,batch size 可设至 6。如果你在 T4 上进行训练,batch size 就只能是 1。
```python
import os
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AdamW, get_scheduler
def collate_fn(batch):
questions, answers, images = zip(*batch)
inputs = processor(text=list(questions), images=list(images), return_tensors="pt", padding=True).to(device)
return inputs, answers
train_dataset = DocVQADataset(data['train'])
val_dataset = DocVQADataset(data['validation'])
batch_size = 6
num_workers = 0
train_loader = DataLoader(train_dataset, batch_size=batch_size,
collate_fn=collate_fn, num_workers=num_workers, shuffle=True)
val_loader = DataLoader(val_dataset, batch_size=batch_size,
collate_fn=collate_fn, num_workers=num_workers)
```
开始训练模型:
```python
epochs = 7
optimizer = AdamW(model.parameters(), lr=1e-6)
num_training_steps = epochs * len(train_loader)
lr_scheduler = get_scheduler(name="linear", optimizer=optimizer,
num_warmup_steps=0, num_training_steps=num_training_steps,)
for epoch in range(epochs):
model.train()
train_loss = 0
i = -1
for inputs, answers in tqdm(train_loader, desc=f"Training Epoch {epoch + 1}/{epochs}"):
i += 1
input_ids = inputs["input_ids"]
pixel_values = inputs["pixel_values"]
labels = processor.tokenizer(text=answers, return_tensors="pt", padding=True, return_token_type_ids=False).input_ids.to(device)
outputs = model(input_ids=input_ids, pixel_values=pixel_values, labels=labels)
loss = outputs.loss
loss.backward()
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
train_loss += loss.item()
avg_train_loss = train_loss / len(train_loader)
print(f"Average Training Loss: {avg_train_loss}")
model.eval()
val_loss = 0
with torch.no_grad():
for batch in tqdm(val_loader, desc=f"Validation Epoch {epoch + 1}/{epochs}"):
inputs, answers = batch
input_ids = inputs["input_ids"]
pixel_values = inputs["pixel_values"]
labels = processor.tokenizer(text=answers, return_tensors="pt", padding=True, return_token_type_ids=False).input_ids.to(device)
outputs = model(input_ids=input_ids, pixel_values=pixel_values, labels=labels)
loss = outputs.loss
val_loss += loss.item()
print(val_loss / len(val_loader))
```
你可以分别对模型和处理器调用 `save_pretrained()` 以保存它们。微调后的模型在 [此处](https://huggingface.co/HuggingFaceM4/Florence-2-DocVQA),你还可以在 [此处](https://huggingface.co/spaces/andito/Florence-2-DocVQA) 找到其演示。
<script
type="module"
src="https://gradio.s3-us-west-2.amazonaws.com/4.36.1/gradio.js"></script>
<gradio-app theme_mode="light" src="https://andito-Florence-2-DocVQA.hf.space"></gradio-app>
## 总结
本文,我们展示了如何有效地针对自定义数据集微调 Florence-2,以在短时间内在全新任务上取得令人眼前一亮的性能。对于那些希望在设备上或在生产环境中经济高效地部署小模型的人来说,该做法特别有价值。我们鼓励开源社区利用这个微调教程,探索 Florence-2 在各种新任务中的巨大潜力!我们迫不及待地想在 🤗 Hub 上看到你的模型!
## 有用资源
- [视觉语言模型详解](https://huggingface.co/blog/zh/vlms)
- [微调 Colab](https://colab.research.google.com/drive/1hKDrJ5AH_o7I95PtZ9__VlCTNAo1Gjpf?usp=sharing)
- [微调 Github 代码库](https://github.com/andimarafioti/florence2-finetuning)
- [Florence-2 推理 Notebook](https://huggingface.co/microsoft/Florence-2-large/blob/main/sample_inference.ipynb)
- [Florence-2 DocVQA 演示](https://huggingface.co/spaces/andito/Florence-2-DocVQA)
- [Florence-2 演示](https://huggingface.co/spaces/gokaygo)
我们感谢 Pedro Cuenca 对本文的审阅。 | 5 |
0 | hf_public_repos/blog | hf_public_repos/blog/zh/stackllama.md | ---
title: "“StackLLaMA”: 用 RLHF 训练 LLaMA 的手把手教程"
thumbnail: /blog/assets/138_stackllama/thumbnail.png
authors:
- user: edbeeching
- user: kashif
- user: ybelkada
- user: lewtun
- user: lvwerra
- user: nazneen
- user: natolambert
translators:
- user: Vermillion-Qi
- user: zhongdongy
---
# “StackLLaMA”: 用 RLHF 训练 LLaMA 的手把手教程
如 [ChatGPT](https://openai.com/blog/chatgpt),[GPT-4](https://openai.com/research/gpt-4),[Claude](https://www.anthropic.com/index/introducing-claude)语言模型 之强大,因为它们采用了 **基于人类反馈的强化学习** (Reinforcement Learning from Human Feedback, RLHF) 来使之更符合我们的使用场景。
本博客旨在展示用 RLHF 训练一个 [LLaMA](https://ai.facebook.com/blog/large-language-model-llama-meta-ai) 模型,以回答 [Stack Exchange](https://stackexchange.com/) 上的问题。具体而言,包含以下几个方面:
- 有监督的微调 (Supervised Fine-tuning,SFT)。
- 奖励 / 偏好建模 (Reward / preference modeling,RM)。
- 基于人类反馈的强化学习 (RLHF)。

摘自 InstructGPT 论文,Ouyang, Long, et al. “Training language models to follow instructions with human feedback.” arXiv preprint arXiv:2203.02155 (2022).
结合了上述方法,我们发布了 StackLLaMA 模型,该模型在 [🤗 Hub](https://huggingface.co/trl-lib/llama-se-rl-peft) 上开源 (访问链接查看 [Meta 的原始 LLaMA](https://ai.facebook.com/blog/large-language-model-llama-meta-ai/) ),整个 [训练的流程](https://huggingface.co/docs/trl/index) 已经集成到了 Hugging Face TRL 库中 。你可以通过下面的 [demo](https://huggingface.co/spaces/trl-lib/stack-llama) 来尝试该模型。
## LLaMA 模型
在实践 RLHF 时,选取一个合适的模型很重要: RLHF 只是一个让模型满足我们交互形式的需求的微调过程 。所以我们选取了最近上线的 [LLaMA](https://arxiv.org/abs/2302.13971) 模型。LLaMA 模型是 Meta AI 最近推出的大语言模型。其参数量大小涵盖 7B 到 65B,以及训练在 1T 和 1.4T 的 token 上,这让其很实用。我们这里采用 7B 的模型。(请填写 Meta AI 的这份 [表单](https://docs.google.com/forms/d/e/1FAIpQLSfqNECQnMkycAp2jP4Z9TFX0cGR4uf7b_fBxjY_OjhJILlKGA/viewform) 来下载模型)。
## Stack Exchange 数据集
收集人类的反馈数据集是很复杂且昂贵的劳动。为了做到这个,并且还能保证模型的有效性,我们使用 [StackExchange 数据集](https://huggingface.co/datasets/HuggingFaceH4/stack-exchange-preferences)。该数据集涵盖了 StackExchange 平台上的问题和答案 (包含 StackOverflow 的编程等话题下的)。这很适合我们的实践,因为其包含了每个答案的赞和踩的数量。
我们按照 [Askell et al. 2021](https://arxiv.org/abs/2112.00861) 中的方法,给每个答案赋分:
```
score = log2 (1 + upvotes) rounded to the nearest integer, plus 1 if the questioner accepted the answer (we assign a score of −1 if the number of upvotes is negative).
```
对奖励模型,我们将看到每个问题总是需要两个答案对比。有些问题有很多答案,可以产生很多对,我们只取十个以限制每个问题的数据量。最后,我们把格式从 HTML 转化到 Markdown 以提高输出的可读性。你可以看到数据集和处理过程的 [笔记本]。(https://huggingface.co/datasets/lvwerra/stack-exchange-paired。)
## 高效训练策略
即使是最小 LLaMA 模型的训练,都需要大量内存。估算一下: 以 bf16 半精度,每个参数用 2 个字节 (以 fp32 精度四字节的标准),训练时需要 8 个字节 (例如 Adam 优化器,参见 Tramsformers 的 [性能文档](https://huggingface.co/docs/transformers/perf_train_gpu_one#optimizer))。可见 7B 参数量的模型将用 (2+8)* 7B = 70 GB 的内存,并且还可能需要更多用于计算诸如注意力分数的中间值。所以很难在一张 80GB 显存的 A100 上训练。或许你可以使用一些技巧,比如用更高效的半精度训练的优化器来压缩内存,但溢出是迟早的。
另外的可能是 **参数高效的微调**(Parameter-Efficient Fine-Tuning, PEFT) 技术,比如 [`peft`](https://github.com/huggingface/peft) 库,它可以对使用 8-bit 加载的模型做 **低秩优化**(Low-Rank Adaptation,LoRA)。

线性层的低秩优化: 额外参数 (橙色) 被加在 Frozen 层 (蓝色),编码后的隐藏状态与 Frozen 层的隐藏状态叠加在一起。
以 8bit 加载模型会大幅降低内存占用,因为每个参数只要一字节 (比如 7B LLaMA 是 7GB 内存)。与直接训练原始模型不同,LoRA 在特定层 (一般是注意力层) 添加少量新参数,大幅降低了需要训练的参数。
此情此景,一个衡量标准是 1B 的参数在整个微调过程中占 ~1.2-1.4GB (和具体 batch size 及序列长度有关)。在参考的博客中具体讨论了,这使得低成本下微调较大参数规模的模型成为可能 (比如在一张 A100 上微调 50-60B 的参数)。
这些技术能让微调大模型的任务,在消费级设备和 Google Colab 上执行。这里提供一些值得关注的演示 demo: `facebook/opt-6.7b` (在 float16 精度下 13GB) 和 `openai/whisper-large`
跑在 Google Colab (15GB 显存) 上。欲了解 `peft` 的使用,请参见 [github 仓库](https://github.com/huggingface/peft) 或者之前的 [博客介绍](https://huggingface.co/blog/trl-peft): 在客户端训练 20B 参数量的模型。
现在我们能在一张 GPU 上微调很大的模型了,但训练还是会很慢。此时最简单的策略便是并行化: 把一个训练同时放到不同的 GPU 上,各 GPU 接受不同的 batch。这样我们可以并行执行前向传播和后向传播,通过增加 GPU 的数量实现并行能力提升。

我们可以选用 `trainsformers.Trainer` 或 `accelerate`,因为它们都支持无代码变更进行数据并行化。只需注意调用 `torchrun` 或者 `accelerate launch` 脚本时的参数即可实现。比如以下就是在一个 8 显卡的机器上分别用 `accelerate launch` 和 `torchrun`的方法:
```bash
accelerate launch --multi_gpu --num_machines 1 --num_processes 8 my_accelerate_script.py
torchrun --nnodes 1 --nproc_per_node 8 my_torch_script.py
```
## 有监督的微调
在训练奖励模型和用 RL 之前,模型若是已经在我们感兴趣的方面表现好将会很有帮助。在我们的示例中,我们想要其能回答问题,而其他时候,我们可能它能听指令 (这时对指令执行的微调是理想的)。实现这个最简单的方法便是面向该语言任务,用该任务和领域的文本,继续训练。[StackExchange 数据集](https://huggingface.co/datasets/HuggingFaceH4/stack-exchange-preferences) 含 10M 的指令量,所以我们能用其子集很容易地训练。
在用 RLHF 之前的模型微调没有特别的,就是一般的面向语言任务的预训练模型微调。为了高效利用数据,我们采用了称之为 **打包** 的技术: 与 batch 中的每个样本均由单一文本组成,最后基于最长的文本来 padding (填充),我们把很多文本拼接起来,用 EOS token 来隔开,然后分割成一些 chunk (切块) 来做成 batch,避免 padding。

该方法大大提高了效率,因为模型输入的所有 token 都对 loss 有所训练,而非 padding 作为掩码被丢弃了。如果你没有足够数据,并且担心随意地分开 token 会失去上下文语义,你也可以用传统的数据加载器
`ConstantLengthDataset` 解决了 **打包**技术,并且我们能在用 `peft` 加载模型后用 `Trainer`。首先,我们用 `int8` 加载模型,准备训练,然后加入 `LoRA` 微调器。
```python
# load model in 8bit
model = AutoModelForCausalLM.from_pretrained(
args.model_path,
load_in_8bit=True,
device_map={"": Accelerator().local_process_index}
)
model = prepare_model_for_int8_training(model)
# add LoRA to model
lora_config = LoraConfig(
r=16,
lora_alpha=32,
lora_dropout=0.05,
bias="none",
task_type="CAUSAL_LM",
)
model = get_peft_model(model, config)
```
我们根据相应的语言任务,对模型训练几千个 step (步),并保存模型。由于我们将会有其他微调模型的目的,我们将 LoRA 的微调器权重合并到原模型中。
**声明**: 因为 LLaMA 的许可证规定,我们只能发布微调器的权重,你需要填 Meta AI 的 [表格](https://docs.google.com/forms/d/e/1FAIpQLSfqNECQnMkycAp2jP4Z9TFX0cGR4uf7b_fBxjY_OjhJILlKGA/viewform) 来获取模型,然后用这个 [脚本](https://github.com/huggingface/transformers/blob/main/src/transformers/models/llama/convert_llama_weights_to_hf.py) 来转成 🤗 Transformers 格式。注意 🤗 Transformers 应该从源码安装,或者 `v4.28` 版。
现在我们已经微调好了模型,可以训练奖励模型了。
## 奖励模型和人类偏好
原则上,我们可以直接用人类标注来对模型做 RLHF 微调。然而,这将需要我们给人类发送一些样本,在每轮优化后计分。这是贵且慢的,因为收敛需要的训练样本量大,而人类阅读和标注的速度有限。
一个比直接反馈更好的策略是,在进入 RL 循环之前用人类标注集来训练一个奖励模型。奖励模型的目的是模拟人类对文本的打分。构建奖励模型有许多能用的策略: 最直接的便是预测标注 (比如根据好与坏,输出比分或者布尔值)。最佳实践是,预测结果的排序,即对每个 prompt (输入文本) 对应的两个结果 $(y_k, y_j)$,模型预测人类标注的比分哪个更高。
或者表示为 loss (损失) 函数:
$$
\mbox{loss}(\theta) = - E_{(x, y_j, y_k)~D} [ \mbox{log}( \sigma( r_\theta (x, y_j) - r_\theta(x, y_k)) ) ]
$$
其中 $r$ 是模型对可能的标注 $y_j$ 的预测分数。
在 StackExchange 数据集上,我们能得到两个答案的受欢迎程度。有了这个信息和上面的损失函数,我们就能自定义 loss 来改 `transformers.Trainer` 了。
```python
class RewardTrainer(Trainer):
def compute_loss(self, model, inputs, return_outputs=False):
rewards_j = model(input_ids=inputs["input_ids_j"], attention_mask=inputs["attention_mask_j"])[0]
rewards_k = model(input_ids=inputs["input_ids_k"], attention_mask=inputs["attention_mask_k"])[0]
loss = -nn.functional.logsigmoid(rewards_j - rewards_k).mean()
if return_outputs:
return loss, {"rewards_j": rewards_j, "rewards_k": rewards_k}
return loss
```
我们用数据集中的 100000 对,并在 50000 对上评估。在比较小的 batch size,为 4 下,我们用 LoRA 的 `peft` 微调器来训练 LLaMA 模型,在 BF16 精度下用 Adam 优化器。我们的 LoRA 设置是:
```python
peft_config = LoraConfig(
task_type=TaskType.SEQ_CLS,
inference_mode=False,
r=8,
lora_alpha=32,
lora_dropout=0.1,
)
```
训练用 [Weights & Biases](https://wandb.ai/krasul/huggingface/runs/wmd8rvq6?workspace=user-krasul) 来记日志,并在 🤗 训练集群上,用 8 卡 A-100,要数小时,最后准确率为 **67%**。尽管看上去可能低了,但想想这个任务的难度。
如下文要细说的,训练结果将作为固定参数,以供下游使用。
## 基于人类反馈的强化学习
现在我们手头有了微调的语言模型和奖励模型,可以开始执行 RL 循环了: 这个过程大致分为三步
1. 生成对 prompt (输入文本) 的反馈。
2. 用奖励模型来对反馈评分。
3. 对评分,进行一轮策略优化的强化学习。

在被 token 化并输入奖励模型前,提问和回答的 prompt 模版如下:
```
Question: <Query>
Answer: <Response>
```
在有监督训练 (SFT),奖励模型训练 (RM) 和 RLHF 的阶段都用此模版。
用 RL 训练语言模型出现的常见问题是,模型可能学会胡说八道以糊弄奖励模型,后者可能给高分。为了权衡,我们对奖励增加惩罚: 留一份没有训练的模型,如何比较两者输出的 KL 散度
$$
\mbox{R}(x, y) = \mbox{r}(x, y) - \beta \mbox{KL}(x,y)
$$
其中 $r$ 是奖励模型的结果,$\mbox{KL}(x,y)$ 是当前模型和对比模型的 KL 散度差。
再提一遍,我们用 `peft` 来实现内存高效的训练,其对 RLHF 阶段提供了优势。这里参考的模型和训练的模型用同一个基底,也就是有监督训练 (SFT) 的结果,它是用 8-bit 来加载,并且自始自终是固定的。我们仅用 PPO 方法优化最终模型的 LoRA 权重,同时全部共享一个基底模型。
```python
for epoch, batch in tqdm(enumerate(ppo_trainer.dataloader)):
question_tensors = batch["input_ids"]
# sample from the policy and generate responses
response_tensors = ppo_trainer.generate(
question_tensors,
return_prompt=False,
length_sampler=output_length_sampler,
**generation_kwargs,
)
batch["response"] = tokenizer.batch_decode(response_tensors, skip_special_tokens=True)
# Compute sentiment score
texts = [q + r for q, r in zip(batch["query"], batch["response"])]
pipe_outputs = sentiment_pipe(texts, **sent_kwargs)
rewards = [torch.tensor(output[0]["score"] - script_args.reward_baseline) for output in pipe_outputs]
# Run PPO step
stats = ppo_trainer.step(question_tensors, response_tensors, rewards)
# Log stats to WandB
ppo_trainer.log_stats(stats, batch, rewards)
```
我们用 🤗 集群,在 3x8 A100-80GB 的机器上训练了 20h,但一个差不多的结果很快 (大概,在 8 A100-80GB 上训练 20h)。所有的训练过程都在 [Weight & Biases](https://wandb.ai/lvwerra/trl/runs/ie2h4q8p) 上找到。

每个 batch 的奖励,对每步的训练,在 ~1000 步时模型的效果最好。
所以模型训好了能干啥嘞 ? 我们拭目以待 !

尽管我们不该太相信其结果,至少目前。但结果已经很好了,甚至附上了 Google 链接。我们来看看训练时的挑战。
## 挑战,不稳定和突破口
用 RL 训练 LLM (Large Language Models,大语言模型) 不总是一帆风顺的,你看到的本文也是经历无数实验,无数失败和无数调参的。即便如此,该模型也不能说变现完美。这儿,我们分享一些遇到的观察和问题。
### 奖励更高代表更好表现 ?

天呐,这个实验肯定表现很好 ! 看奖励的曲线多甜啊 !
在 RL 中,一般而言,奖励越高越好。在 RLHF 中,我们用了一个奖励模型,它不完美,所以留给了 PPO 算法捡漏的机会。这能导致奖励突然上升,然而当检查文本结果时,却充斥了字符 “```”,因为奖励模型对含有代码 stack exchange 的答案更信任。幸运的是,该问题碰到的很少,应该是采取的 KL 散度的惩罚项起到了作用。
### KL 散度总是正的?
如我们前面所提到的,一个 KL 惩罚项被用来保证训练后的分布和原始分布接近。一般地 , KL 散度来度量两个分布的相似程度,并且总是正的。然而,在 `trl` 我们用了一个 KL 的近似,期望值和真的 KL 散度相同。
$$
KL_{pen} (x, y) = \mbox{log} (\pi_\phi^\mbox{RL}(y | x) / \pi^{\mbox{SFT}}(y|x))
$$
显然,当训练中一个 token 比原始模型概率低,这会导致 KL 散度为负,合适的取样和平均总能得到正的。但是一些采样的生成策略导致了不匀称的采样。比如,当生成被 padding 的序列 batch 时和当设置 EOS token 被压缩的最小长度是,模型会有很大/很小的概率到负 KL 散度的 token。同时 PPO 算法是面向奖励优化的,模型就会追逐负的惩罚,导致训练不稳定。

对生成和采样,你需要特别小心。我们建议一开始用最简单的方式,如何在逐渐复杂。
### 任然存在的问题
任然有很多问题我们不懂,比如下面,loss 间断地跳跃,导致之后的不稳定

一旦我们解决了这些问题,我们就会上传变化到 `trl` 上,以保证社区受益。
## 总结
在本博客,我们走过了 RLHF 训练的整个流程,从准备人类标注的数据集开始,调整语言模型到特定领域,训练奖励模型,并最终用 RL 训练一个模型。
通过使用 `peft`,任何人都能在一张 GPU 上跑我们的实验 ! 如果训练慢了,可以用数据并行化的方法,不需要改任何代码,或者用多张 GPU 并行提高训练速度。
对实际应用,这仅仅是第一步 ! 一旦你有了模型,你就要和其他模型比较优劣。这个可以用一个面向不同模型的排名生成做到,和我们训练奖励数据集类似。
一旦你加入了评估的步骤,好玩的就开始了: 你可以在原数据集上反复炼丹,也可以增加数据集或者对原数据集提纯。另外,你可以对奖励模型和生成试不同大小和结构的模型,这需要时间。
我们在积极提高 TRL 以保证 RLHF 的每一步都可见,并且十分激动能看到人们用它来构建的东西。如果你想有所贡献,欢迎看我们的 [Github Issue](https://github.com/lvwerra/trl/issues)。
## 引用
```
@misc {beeching2023stackllama,
author = { Edward Beeching and
Younes Belkada and
Kashif Rasul and
Lewis Tunstall and
Leandro von Werra and
Nazneen Rajani and
Nathan Lambert
},
title = { StackLLaMA: An RL Fine-tuned LLaMA Model for Stack Exchange Question and Answering },
year = 2023,
url = { https://huggingface.co/blog/stackllama },
doi = { 10.57967/hf/0513 },
publisher = { Hugging Face Blog }
}
```
## 感谢
我们感谢 Philipp Schmid 分享了他对文本生成绝妙的 [demo](https://huggingface.co/spaces/philschmid/igel-playground), 我们的 demo 也是基于他的。我们也感谢 Omar Sanseviero 和 Louis Castricato 对我们博客的草稿提供宝贵详尽的反馈。
| 6 |
0 | hf_public_repos/blog | hf_public_repos/blog/zh/textgen-pipe-gaudi.md | ---
title: "基于英特尔® Gaudi® 2 AI 加速器的文本生成流水线"
thumbnail: /blog/assets/textgen-pipe-gaudi/thumbnail.png
authors:
- user: siddjags
guest: true
translators:
- user: MatrixYao
- user: zhongdongy
proofreader: true
---
# 基于英特尔® Gaudi® 2 AI 加速器的文本生成流水线
随着生成式人工智能 (Generative AI,GenAI) 革命的全面推进,使用 Llama 2 等开源 transformer 模型生成文本已成为新风尚。人工智能爱好者及开发人员正在寻求利用此类模型的生成能力来赋能不同的场景及应用。本文展示了如何基于 Optimum Habana 以及我们实现的流水线类轻松使用 Llama 2 系列模型 (7b、13b 及 70b) 生成文本 - 仅需几行代码,即可运行!
我们设计并实现了一个旨在为用户提供极大的灵活性和易用性流水线类。它提供了高层级的抽象以支持包含预处理和后处理在内的端到端文本生成。同时,用户也可以通过多种方法使用该流水线类 - 你可以在 Optimum Habana 代码库中直接运行 `run_pipeline.py` 脚本,也可以在你自己的 python 脚本中调用该流水线类,还可以用该流水线类来初始化 LangChain。
## 准备工作
由于 Llama 2 模型实行的是许可式访问,因此如果你尚未申请访问权限,需要首先申请访问权限。方法如下: 首先,访问 [Meta 网站](https://ai.meta.com/resources/models-and-libraries/llama-downloads) 并接受相应条款。一旦 Meta 授予你访问权限 (可能需要一两天),你需要使用你当时使用的电子邮箱地址申请 [Hugging Face Llama 2 模型库](https://huggingface.co/meta-llama/Llama-2-7b-hf) 的访问权限。
获取访问权限后,可通过运行以下命令登录你的 Hugging Face 帐户 (此时会需要一个访问令牌,你可从 [你的用户个人资料页面](https://huggingface.co/settings/tokens) 上获取):
```bash
huggingface-cli login
```
你还需要安装最新版本的 Optimum Habana 并拉取其代码库以获取后续要使用的脚本。命令如下:
```bash
pip install optimum-habana==1.10.4
git clone -b v1.10-release https://github.com/huggingface/optimum-habana.git
```
如果想运行分布式推理,还需要根据你的 SynapseAI 版本安装对应的 DeepSpeed。在本例中,我使用的是 SynapseAI 1.14.0。
```bash
pip install git+https://github.com/HabanaAI/[email protected]
```
至此,准备完毕!
## 方法一: 通过命令直接使用流水线脚本
首先,使用如下命令进入 `optimum-habana` 的相应目录,然后按照 `README` 中的说明更新 `PYTHONPATH` 。
```bash
cd optimum-habana/examples/text-generation
pip install -r requirements.txt
cd text-generation-pipeline
```
如果你想用自己的提示生成文本序列,下面给出了一个示例:
```bash
python run_pipeline.py --model_name_or_path meta-llama/Llama-2-7b-hf --use_hpu_graphs --use_kv_cache --max_new_tokens 100 --do_sample --prompt "Here is my prompt"
```
你还可以传入多个提示作为输入,并更改生成的温度或 `top_p` 值,如下所示:
```bash
python run_pipeline.py --model_name_or_path meta-llama/Llama-2-13b-hf --use_hpu_graphs --use_kv_cache --max_new_tokens 100 --do_sample --temperature 0.5 --top_p 0.95 --prompt "Hello world" "How are you?"
```
如果想用 Llama-2-70b 等大尺寸模型生成文本,下面给出了一个用 DeepSpeed 启动流水线的示例命令:
```bash
python ../../gaudi_spawn.py --use_deepspeed --world_size 8 run_pipeline.py --model_name_or_path meta-llama/Llama-2-70b-hf --max_new_tokens 100 --bf16 --use_hpu_graphs --use_kv_cache --do_sample --temperature 0.5 --top_p 0.95 --prompt "Hello world" "How are you?" "Here is my prompt" "Once upon a time"
```
## 方法二: 在自己的 Python 脚本中调用流水线类
你还可以在自己的 Python 脚本中调用我们实现的流水线类,如下例所示。你需要在 `optimum-habana/examples/text-generation/text- generation-pipeline` 目录下运行该示例脚本 [译者注: 原因是 `GaudiTextGenerationPipeline` 这个类的定义在该目录的 `pipeline.py` 中]。
```python
import argparse
import logging
from pipeline import GaudiTextGenerationPipeline
from run_generation import setup_parser
# Define a logger
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO,
)
logger = logging.getLogger(__name__)
# Set up an argument parser
parser = argparse.ArgumentParser()
args = setup_parser(parser)
# Define some pipeline arguments. Note that --model_name_or_path is a required argument for this script
args.num_return_sequences = 1
args.model_name_or_path = "meta-llama/Llama-2-7b-hf"
args.max_new_tokens = 100
args.use_hpu_graphs = True
args.use_kv_cache = True
args.do_sample = True
# Initialize the pipeline
pipe = GaudiTextGenerationPipeline(args, logger)
# You can provide input prompts as strings
prompts = ["He is working on", "Once upon a time", "Far far away"]
# Generate text with pipeline
for prompt in prompts:
print(f"Prompt: {prompt}")
output = pipe(prompt)
print(f"Generated Text: {repr(output)}")
```
> 你需要用 `python <name_of_script>.py --model_name_or_path a_model_name` 命令来运行上述脚本,其中 `--model_name_or_path` 是必需的参数。当然,你也可以在代码中直接更改模型名称 (如上述 Python 代码片段所示)。
上述代码段表明我们实现的流水线类 `GaudiTextGenerationPipeline` 会对输入字符串执行生成文本所需的全部操作,包括数据预处理及后处理在内。
## 方法二: 在 LangChain 中使用流水线类
如果在构造时传入 `use_with_langchain` 参数的话,我们的文本生成流水线还可以作为 LangChain 的兼容组件使用。首先,按照如下方式安装 LangChain:
```bash
pip install langchain==0.0.191
```
下面给出了一个如何在 LangChain 中使用我们的流水线类的代码示例。
```python
import argparse
import logging
from langchain.llms import HuggingFacePipeline
from langchain.prompts import PromptTemplate
from langchain.chains import LLMChain
from pipeline import GaudiTextGenerationPipeline
from run_generation import setup_parser
# Define a logger
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO,
)
logger = logging.getLogger(__name__)
# Set up an argument parser
parser = argparse.ArgumentParser()
args = setup_parser(parser)
# Define some pipeline arguments. Note that --model_name_or_path is a required argument for this script
args.num_return_sequences = 1
args.model_name_or_path = "meta-llama/Llama-2-13b-chat-hf"
args.max_input_tokens = 2048
args.max_new_tokens = 1000
args.use_hpu_graphs = True
args.use_kv_cache = True
args.do_sample = True
args.temperature = 0.2
args.top_p = 0.95
# Initialize the pipeline
pipe = GaudiTextGenerationPipeline(args, logger, use_with_langchain=True)
# Create LangChain object
llm = HuggingFacePipeline(pipeline=pipe)
template = """Use the following pieces of context to answer the question at the end. If you don't know the answer,\
just say that you don't know, don't try to make up an answer.
Context: Large Language Models (LLMs) are the latest models used in NLP.
Their superior performance over smaller models has made them incredibly
useful for developers building NLP enabled applications. These models
can be accessed via Hugging Face's `transformers` library, via OpenAI
using the `openai` library, and via Cohere using the `cohere` library.
Question: {question}
Answer: """
prompt = PromptTemplate(input_variables=["question"], template=template)
llm_chain = LLMChain(prompt=prompt, llm=llm)
# Use LangChain object
question = "Which libraries and model providers offer LLMs?"
response = llm_chain(prompt.format(question=question))
print(f"Question 1: {question}")
print(f"Response 1: {response['text']}")
question = "What is the provided context about?"
response = llm_chain(prompt.format(question=question))
print(f"\nQuestion 2: {question}")
print(f"Response 2: {response['text']}")
```
> 该流水线类当前仅在 LangChain 0.0.191 版上验证通过,其他版本可能不兼容。
## 总结
我们在英特尔® Gaudi® 2 AI 加速器上实现了一个自定义的文本生成流水线,其可接受单个或多个提示作为输入。该流水线类灵活支持各种模型尺寸及各种影响文本生成质量参数。此外,不管是直接使用还是将它插入你自己的脚本都非常简单,并且其还与 LangChain 兼容。
> 使用预训练模型需遵守第三方许可,如 “Llama 2 社区许可协议”(LLAMAV2)。有关 LLAMA2 模型的预期用途有哪些、哪些行为会被视为滥用或超范围使用、预期使用者是谁以及其他条款,请仔细阅读此 [链接](https://ai.meta.com/llama/license/) 中的说明。用户需自主承担遵守任何第三方许可的责任和义务,Habana Labs 不承担任何与用户使用或遵守第三方许可相关的责任。为了能够运行像 `Llama-2-70b-hf` 这样的受限模型,你需要:
>
> - 有一个 Hugging Face 帐户
> - 同意 HF Hub 上模型卡中的模型使用条款
> - 设好访问令牌
> - 使用 HF CLI 登录你的帐户,即在启动脚本之前运行 `huggingface-cli login` | 7 |
0 | hf_public_repos/blog | hf_public_repos/blog/zh/quanto-introduction.md | ---
title: "Quanto:PyTorch 量化工具包"
thumbnail: /blog/assets/169_quanto_intro/thumbnail.png
authors:
- user: dacorvo
- user: ybelkada
- user: marcsun13
translators:
- user: MatrixYao
- user: zhongdongy
proofreader: true
---
# Quanto: PyTorch 量化工具包
量化技术通过用低精度数据类型 (如 8 位整型 (int8) ) 来表示深度学习模型的权重和激活,以减少传统深度学习模型使用 32 位浮点 (float32) 表示权重和激活所带来的计算和内存开销。
减少位宽意味着模型的内存占用更低,这对在消费设备上部署大语言模型至关重要。量化技术也使得我们可以针对较低位宽数据类型进行特殊的计算优化,例如 CUDA 设备有针对 `int8` 或 `float8` 矩阵乘法的硬件优化。
市面上有许多可用于量化 PyTorch 深度学习模型的开源库,它们各有特色及局限。通常来讲,每个库都仅实现了针对特定模型或设备的特性,因而普适性不强。此外,尽管各个库的设计原理大致相同,但不幸的是,它们彼此之间却互不兼容。
因此,[quanto](https://github.com/huggingface/quanto) 库应运而出,其旨在提供一个多功能的 PyTorch 量化工具包。目前 quanto 包含如下特性:
- 在 eager 模式下可用 (适用于无法成图的模型),
- 生成的量化模型可以运行于任何设备 (包括 CUDA 设备和 MPS 设备) 上,
- 自动插入量化和反量化结点,
- 自动插入量化后的 `torch.nn.functional` 算子,
- 自动插入量化后的 `torch.nn` 模块 (具体支持列表见下文),
- 提供无缝的模型量化工作流,支持包含静态量化、动态量化在内的多种模型量化方案,
- 支持将量化模型序列化为 `state_dict` ,
- 不仅支持 `int8` 权重,还支持 `int2` 以及 `int4` ,
- 不仅支持 `int8` 激活,还支持 `float8` 。
最近,出现了很多仅专注于大语言模型 (LLM) 的量化算法,而 [quanto](https://github.com/huggingface/quanto) 的目标为那些适用于任何模态的、易用的量化方案 (如线性量化,分组量化等) 提供简单易用的量化原语。
我们无意取代其他量化库,而是想通过新算法的实现门槛来促进创新,使得大家能够轻松地实现新模块,抑或是轻松组合现有模块来实现新算法。
毫无疑问,量化很困难。当前,如要实现模型的无缝量化,需要大家对 PyTorch 的内部结构有深入了解。但不用担心,[quanto](https://github.com/huggingface/quanto) 的目标就是为你完成大部分繁重的工作,以便你可以集中精力在最重要的事情上,即: 探索低比特 AI 从而找出惠及 GPU 穷人的解决方案。
## 量化工作流
大家可以 pip 安装 `quanto` 包。
```sh
pip install quanto
```
[quanto](https://github.com/huggingface/quanto) 没有对动态和静态量化进行明确区分。因为静态量化可以首先对模型进行动态量化,随后再将权重 `冻结` 为静态值的方式来完成。
典型的量化工作流包括以下步骤:
**1. 量化**
将标准浮点模型转换为动态量化模型。
```python
quantize(model, weights=quanto.qint8, activations=quanto.qint8)
```
此时,我们会对模型的浮点权重进行动态量化以用于后续推理。
**2. 校准 (如果上一步未量化激活,则可选)**
`quanto` 支持校准模式。在校准过程中,我们会给量化模型传一些代表性样本,并在此过程中记录各算子激活的统计信息 (如取值范围)。
```python
with calibration(momentum=0.9):
model(samples)
```
上述代码会自动使能量化模块的激活量化功能。
**3. 微调,即量化感知训练 (可选)**
如果模型的性能下降太多,可以尝试将其微调几轮以恢复原浮点模型的性能。
```python
model.train()
for batch_idx, (data, target) in enumerate(train_loader):
data, target = data.to(device), target.to(device)
optimizer.zero_grad()
output = model(data).dequantize()
loss = torch.nn.functional.nll_loss(output, target)
loss.backward()
optimizer.step()
```
**4. 冻结整型权重**
模型冻结后,其浮点权重将替换为量化后的整型权重。
```python
freeze(model)
```
请参阅 [该例](https://github.com/huggingface/quanto/tree/main/examples) 以深入了解量化工作流程。你还可以查看此 [notebook](https://colab.research.google.com/drive/1qB6yXt650WXBWqroyQIegB-yrWKkiwhl?usp=sharing),其提供了一个完整的用 `quanto` 量化 BLOOM 模型的例子。
## 效果
下面我们列出了一些初步结果,我们还在紧锣密鼓地更新以进一步提高量化模型的准确性和速度。但从这些初步结果中,我们仍能看出 `quanto` 的巨大潜力。
下面两幅图评估了 [mistralai/Mistral-7B-v0.1](https://huggingface.co/mistralai/Mistral-7B-v0.1) 在不同的量化参数下的准确度。注意: 每组的第一根柱子均表示非量化模型。
<div class="row"><center>
<div class="column">
<img src="https://github.com/huggingface/quanto/blob/main/bench/generation/charts/mistralai-Mistral-7B-v0.1_Accuracy.png?raw=true" alt="mistralai/Mistral-7B-v0.1 在 Lambada 数据集上的预测准确度 ">
</div>
</center>
</div>
<div class="row"><center>
<div class="column">
<img src="https://github.com/huggingface/quanto/blob/main/bench/generation/charts/mistralai-Mistral-7B-v0.1_Perplexity.png?raw=true" alt="mistralai/Mistral-7B-v0.1 在 Lambada 数据集上的预测准确度 ">
</div>
</center>
</div>
上述结果均未使用任何高级训后量化算法 (如 [hqq](https://mobiusml.github.io/hqq_blog/) 或 [AWQ](https://github.com/mit-han-lab/llm-awq))。
下图给出了在英伟达 A100 GPU 上测到的词元延迟。
<div class="row"><center>
<div class="column">
<img src="https://github.com/huggingface/quanto/blob/main/bench/generation/charts/mistralai-Mistral-7B-v0.1_Latency__ms_.png?raw=true" alt="mistralai/Mistral-7B-v0.1 平均词元延迟 ">
</div>
</center>
</div>
这些测试结果都尚未利用任何优化的矩阵乘法算子。可以看到,量化位宽越低,开销越大。我们正在持续改进 [quanto](https://github.com/huggingface/quanto),以增加更多的优化器和优化算子,请持续关注我们的性能演进。
请参阅 [quanto 基准测试](https://github.com/huggingface/quanto/tree/main/bench/) 以了解在不同模型架构及配置下的详细结果。
## 集成进 `transformers`
我们已将 `quanto` 无缝集成至 Hugging Face [transformers](https://github.com/huggingface/transformers) 库中。你可以通过给 `from_pretrained` API 传 `QuantoConfig` 参数来对任何模型进行量化!
目前,你需要使用最新版本的 [accelerate](https://github.com/huggingface/accelerate) 以确保完全兼容。
```python
from transformers import AutoModelForCausalLM, AutoTokenizer, QuantoConfig
model_id = "facebook/opt-125m"
tokenizer = AutoTokenizer.from_pretrained(model_id)
quantization_config = QuantoConfig(weights="int8")
quantized_model = AutoModelForCausalLM.from_pretrained(
model_id,
quantization_config= quantization_config
)
```
你只需在 `QuantoConfig` 中设置相应的参数即可将模型的权重/激活量化成 `int8` 、 `float8` 、 `int4` 或 `int2` ; 还可将激活量化成 `int8` 或 `float8` 。如若设成 `float8` ,你需要有一个支持 `float8` 精度的硬件,否则当执行 matmul (仅当量化权重时) 时,我们会默认将权重和激活都转成 `torch.float32` 或 `torch.float16` (具体视模型的原始精度而定) 再计算。目前 `MPS` 设备不支持 `float8` , `torch` 会直接抛出错误。
`quanto` 与设备无关,这意味着无论用的是 CPU/GPU 还是 MPS (Apple 的芯片),你都可以对模型进行量化并运行它。
`quanto` 也可与 `torch.compile` 结合使用。你可以先用 `quanto` 量化模型,然后用 `torch.compile` 来编译它以加快其推理速度。如果涉及动态量化 (即使用量化感知训练或对激活进行动态量化),该功能可能无法开箱即用。因此,请确保在使用 `transformers` API 创建 `QuantoConfig` 时,设置 `activations=None` 。
`quanto` 可用于量化任何模态的模型!下面展示了如何使用 `quanto` 将 `openai/whisper-large-v3` 模型量化至 `int8` 。
```python
from transformers import AutoModelForSpeechSeq2Seq
model_id = "openai/whisper-large-v3"
quanto_config = QuantoConfig(weights="int8")
model = AutoModelForSpeechSeq2Seq.from_pretrained(
model_id,
torch_dtype=torch.float16,
device_map="cuda",
quantization_config=quanto_config
)
```
你可查阅此 [notebook](https://colab.research.google.com/drive/16CXfVmtdQvciSh9BopZUDYcmXCDpvgrT?usp=sharing#scrollTo=IHbdLXAg53JL),以详细了解如何在 `transformers` 中正确使用 `quanto` !
## 实现细节
### 量化张量
`quanto` 的核心是一些 Tensor 子类,其主要做下面两件事:
- 将源张量按最优`比例` 投影至给定量化数据类型的取值范围内。
- 将投影后的值映射至目标数据类型。
当目标类型是浮点型时,映射由 PyTorch 原生转换接口 (即 `Tensor.to()` ) 完成。而当目标类型是整型时,映射可以用一个简单的舍入操作 (即 `torch.round()` ) 来完成。
投影的目标是提高数据类型转换的精确度,具体可以通过最小化以下两个值来达成:
- 饱和值的个数 (即有多少个数最终映射为目标数据类型的最小值/最大值),
- 归零值的个数 (即有多少个数因为小于目标数据类型可以表示的最小数字,所以被映射成了 0)。
为了提高效率起见, `8 比特` 量化时,我们使用对称投影,即以零点为中心进行投影。一般而言,对称量化张量与许多标准算子兼容。
在使用较低位宽的量化 (如 `int2` 或 `int4` ) 时,一般使用的是仿射投影。此时,会多一个 `zeropoint` 参数以对齐投影值和原值的零点。这种方法对量化范围的覆盖度会好些。仿射量化张量通常更难与标准算子兼容,因此一般需要自定义很多算子。
### 量化 `torch.nn` 模块
`quanto` 实现了一种通用机制,以用能够处理 `quanto` 张量的 `quanto` 模块替换相应的 `torch` 模块 ( `torch.nn.Module` )。
`quanto` 模块会动态对 `weights` 进行数据类型转换,直至模型被冻结,这在一定程度上会减慢推理速度,但如果需要微调模型 (即量化感知训练),则这么做是需要的。
此外,我们并未量化 `bias` 参数,因为它们比 `weights` 小得多,并且对加法进行量化很难获得太多加速。
我们动态地将激活量化至固定取值范围 (默认范围为 `[-1, 1]` ),并通过校准过程决定最佳的比例 (使用二阶动量更新法)。
我们支持以下模块的量化版:
- [Linear](https://pytorch.org/docs/stable/generated/torch.nn.Linear.html) (QLinear)。仅量化权重,不量化偏置。输入和输出可量化。
- [Conv2d](https://pytorch.org/docs/stable/generated/torch.nn.Conv2d.html) (QConv2D)。仅量化权重,不量化偏置。输入和输出可量化。
- [LayerNorm](https://pytorch.org/docs/stable/generated/torch.nn.LayerNorm.html)。权重和偏至均 **不** 量化。输出可量化。
### 定制算子
得益于 PyTorch 出色的调度机制,[quanto](https://github.com/huggingface/quanto) 支持在 [transformers](https://github.com/huggingface/transformers) 或 [diffusers](https://github.com/huggingface/diffusers) 的模型中最常用的函数,无需过多修改模型代码即可启用量化张量。
大多数“调度”功能可通过标准的 PyTorch API 的组合来完成。但一些复杂的函数仍需要使用 `torch.ops.quanto` 命名空间下的自定义操作。其中一个例子是低位宽的融合矩阵乘法。
### 训后量化优化
[quanto](https://github.com/huggingface/quanto) 中尚未支持高级的训后量化算法,但该库足够通用,因此与大多数 PTQ 优化算法兼容,如 [hqq](https://mobiusml.github.io/hqq_blog/)、[AWQ](https://github.com/mit-han-lab/llm-awq) 等。
展望未来,我们计划无缝集成这些最流行的算法。
## 为 Quanto 作出贡献
我们非常欢迎大家对 [quanto](https://github.com/huggingface/quanto) 作出贡献,尤其欢迎以下几类贡献:
- 实现更多针对特定设备的 [quanto](https://github.com/huggingface/quanto) 优化算子,
- 支持更多的 PTQ 优化算法,
- 扩大量化张量可调度操作的覆盖面。 | 8 |
0 | hf_public_repos/blog | hf_public_repos/blog/zh/open-source-llms-as-agents.md | ---
title: "开源大语言模型作为 LangChain 智能体"
thumbnail: /blog/assets/open-source-llms-as-agents/thumbnail_open_source_agents.png
authors:
- user: m-ric
- user: Jofthomas
- user: andrewrreed
translator:
- user: Evinci
- user: zhongdongy
proofreader: true
---
# 开源大语言模型作为 LangChain 智能体
## 概要
开源大型语言模型 (LLMs) 现已达到一种性能水平,使它们适合作为推动智能体工作流的推理引擎: [Mixtral](https://huggingface.co/blog/mixtral) 甚至在我们的基准测试中 [超过了 GPT-3.5](https://chat.openai.com/c/a130c62f-e9ac-4ad7-9261-832da1c2c9b5#results),并且通过微调,其性能可以轻易的得到进一步增强。
## 引言
针对 [因果语言建模](https://huggingface.co/docs/transformers/tasks/language_modeling) 训练的大型语言模型 (LLMs) 可以处理广泛的任务,但它们经常在逻辑、计算和搜索等基本任务上遇到困难。最糟糕的情况是,它们在某个领域,比如数学,表现不佳,却仍然试图自己处理所有计算。
为了克服这一弱点,除其他方法外,可以将 LLM 整合到一个系统中,在该系统中,它可以调用工具: 这样的系统称为 LLM 智能体。
在这篇文章中,我们将解释 ReAct 智能体的内部工作原理,然后展示如何使用最近在 LangChain 中集成的 `ChatHuggingFace` 类来构建它们。最后,我们将几个开源 LLM 与 GPT-3.5 和 GPT-4 进行基准测试。
## 目录
- [什么是智能体?](#什么是智能体?)
- [ReAct智能体内部工作原理示例](#ReAct智能体内部工作原理示例)
- [智能体系统的挑战](#智能体系统的挑战)
- [使用LangChain运行智能体](#使用LangChain运行智能体)
- [智能体对决:开源LLM作为通用推理智能体的表现如何?](#智能体对决:开源LLM作为通用推理智能体的表现如何?)
- [评估](#评估)
- [结果](#结果)
## 什么是智能体?
LLM 智能体的定义非常宽泛: 它们指的是所有将 LLMs 作为核心引擎,并能够根据观察对其环境施加影响的系统。这些系统能够通过多次迭代“感知 ⇒ 思考 ⇒ 行动”的循环来实现既定任务,并常常融入规划或知识管理系统以提升其表现效能。你可以在 [Xi et al., 2023](https://huggingface.co/papers/2309.07864) 的研究中找到对智能体领域综述的精彩评述。
今天,我们将重点放在 **ReAct 智能体** 上。[ReAct](https://huggingface.co/papers/2210.03629) 采用一种基于“**推理** (Reasoning)”与“**行动** (Acting)”结合的方式来构建智能体。在提示词中,我们阐述了模型能够利用哪些工具,并引导它“逐步”思考 (亦称为 [思维链](https://huggingface.co/papers/2201.11903) 行为),以规划并实施其后续动作,达成最终的目标。
<p align="center">
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/blog/open-source-llms-as-agents/ReAct.png" alt="drawing" width=90%>
</p>
### ReAct 智能体内部工作原理示例
上述图解虽显得有些抽象,但其核心原理其实相当直接。
参见 [此笔记本](https://colab.research.google.com/drive/1j_vsc28FwZEDocDxVxWJ6Fvxd18FK8Gl?usp=sharing): 我们借助 Transformers 库展示了一个最基础的工具调用实例。
本质上,LLM 通过一个循环被调用,循环中的提示包含如下内容:
```
这里是一个问题: “{question}”
你可以使用这些工具: {tools_descriptions}。
首先,你需要进行‘思考: {your_thoughts}’,接下来你可以:
- 以正确的 JSON 格式发起工具调用,
- 或者,以‘最终答案:’为前缀来输出你的答案。
```
接下来,你需要解析 LLM 的输出:
- 如果输出中包含`‘最终答案:’` 字符串,循环便结束,并输出该答案;
- 若不包含,则表示 LLM 进行了工具调用: 你需解析此输出以获得工具的名称及其参数,随后根据这些参数执行相应工具的调用。此工具调用的结果将被追加至提示信息中,然后你将带有这些新增信息的提示再次传递给 LLM,直至它获得足够的信息来给出问题的最终答案。
例如,LLM 的输出在回答问题: `1:23:45 中有多少秒?` 时可能看起来像这样:
```
思考: 我需要将时间字符串转换成秒。
动作:
{
"action": "convert_time",
"action_input": {
"time": "1:23:45"
}
}
```
鉴于此输出未包含 `‘最终答案:’` 字符串,它代表进行了工具调用。因此我们解析该输出,获取工具调用的参数: 以参数 `{"time": "1:23:45"}` 调用 `convert_time` 工具,执行该工具调用后返回 `{'seconds': '5025'}` 。
于是,我们将这整个信息块追加至提示词中。
更新后的提示词现在变为 (更为详尽的版本):
```
这是一个问题: “1:23:45 包含多少秒?”
你可以使用以下工具:
- convert_time: 将小时、分钟、秒格式的时间转换为秒。
首先,进行“思考: {your_thoughts}”,之后你可以:
- 使用正确的 JSON 格式调用工具,
- 或以“最终答案:”为前缀输出你的答案。
思考: 我需要把时间字符串转换成秒数。
行动:
{
"action": "convert_time",
"action_input": {
"time": "1:23:45"
}
}
观测结果: {'seconds': '5025'}
```
➡️ 我们用这个新的提示再次调用 LLM,鉴于它可以访问工具调用结果中的 `观测结果` ,LLM 现在最有可能输出:
```
思考: 我现在有了回答问题所需的信息。
最终答案: 1:23:45 中有 5025 秒。
```
任务就这样完成了!
### 智能体系统的挑战
一般来说,运行 LLM 引擎的智能体系统的难点包括:
1. 从提供的工具中选择一个能够帮助实现目标的工具: 例如,当询问`“大于 30,000 的最小质数是什么?”` 时,智能体可能会调用`“K2 的高度是多少?”` 的`Search` 工具,但这并无帮助。
2. 以严格的参数格式调用工具: 例如,在尝试计算一辆汽车 10 分钟内行驶 3 公里的速度时,你必须调用`Calculator` 工具,通过`distance` 除以`time` 来计算: 即便你的 Calculator 工具接受 JSON 格式的调用`{“tool”: “Calculator”, “args”: “3km/10min”}` ,也存在许多陷阱,例如:
- 工具名称拼写错误: `“calculator”` 或`“Compute”` 是无效的
- 提供的是参数名称而非其值: `“args”: “distance/time”`
- 格式非标准化: `“args": "3km in 10minutes”`
3. 高效地吸收和利用过去观察到的信息,无论是初始上下文还是使用工具后返回的观察结果。
那么,完整的智能体设置会是怎样的呢?
## 使用 LangChain 运行智能体
我们刚刚在 [🦜🔗LangChain](https://www.langchain.com/) 中集成了一个 `ChatHuggingFace` 封装器,使你能够基于开源模型创建智能体。
创建 ChatModel 并为其配备工具的代码非常简单,你可以在 [Langchain 文档](https://python.langchain.com/docs/integrations/chat/huggingface) 中查看所有相关代码。
```python
from langchain_community.llms import HuggingFaceHub
from langchain_community.chat_models.huggingface import ChatHuggingFace
llm = HuggingFaceHub(
repo_id="HuggingFaceH4/zephyr-7b-beta",
task="text-generation",
)
chat_model = ChatHuggingFace(llm=llm)
```
你可以通过为其提供 ReAct 风格的提示词和工具,将 `chat_model` 转化为一个智能体:
```python
from langchain import hub
from langchain.agents import AgentExecutor, load_tools
from langchain.agents.format_scratchpad import format_log_to_str
from langchain.agents.output_parsers import (
ReActJsonSingleInputOutputParser,
)
from langchain.tools.render import render_text_description
from langchain_community.utilities import SerpAPIWrapper
# 设置工具
tools = load_tools(["serpapi", "llm-math"], llm=llm)
# 设置 ReAct 风格的提示词
prompt = hub.pull("hwchase17/react-json")
prompt = prompt.partial(
tools=render_text_description(tools),
tool_names=", ".join([t.name for t in tools]),
)
# 定义智能体
chat_model_with_stop = chat_model.bind(stop=["\nObservation"])
agent = (
{
"input": lambda x: x["input"],
"agent_scratchpad": lambda x: format_log_to_str(x["intermediate_steps"]),
}
| prompt
| chat_model_with_stop
| ReActJsonSingleInputOutputParser()
)
# 实例化 AgentExecutor
agent_executor = AgentExecutor(agent=agent, tools=tools, verbose=True)
agent_executor.invoke(
{
"input": "Who is the current holder of the speed skating world record on 500 meters? What is her current age raised to the 0.43 power?"
}
)
```
智能体将处理如下输入:
```markdown
思考: 为了回答这个问题,我需要找出当前速滑世界纪录保持者的年龄。我将使用搜索工具来获取这些信息。
行动:
{
"action": "search",
"action_input": " 速滑 500 米世界纪录保持者的年龄 "
}
观察: ...
```
## 智能体对决: 开源 LLM 作为通用推理智能体的表现如何?
你可以在 [这里](https://github.com/aymeric-roucher/benchmark_agents/) 找到这个基准测试的代码。
### 评估
我们旨在评估开源大型语言模型 (LLMs) 作为通用推理智能体的表现。因此,我们选择了需要运用逻辑和使用基本工具 (如计算器和互联网搜索访问) 的问题。
[最终数据集](https://huggingface.co/datasets/m-ric/agents_small_benchmark) 是从其他三个数据集中选取样本的组合:
- 为了测试互联网搜索能力,我们选取了 [HotpotQA](https://huggingface.co/datasets/hotpot_qa) 中的问题: 虽然这是一个检索型数据集,但它也可用于需要互联网访问的通用问题解答。有些问题原本需要结合来自不同来源的信息: 在我们的设置中,这意味着需要进行多步互联网搜索以整合结果。
- 对于计算器的应用,我们加入了来自 [GSM8K](https://huggingface.co/datasets/gsm8k) 的问题: 这个数据集测试小学数学能力,并且完全可以通过正确利用 4 个运算符 (加、减、乘、除) 来解决。
- 我们还从 [GAIA](https://huggingface.co/papers/2311.12983) 中挑选了问题,这是一个对通用 AI 助理来说极具挑战性的基准测试。原始数据集中的问题可能需要许多其他不同的工具,如代码解释器或 PDF 阅读器: 我们精选了那些仅需搜索和计算器的问题。
评估采用了 GPT-4 作为评判员,基于 [Prometheus 提示格式](https://huggingface.co/kaist-ai/prometheus-13b-v1.0) 的提示进行,结果以 5 点李克特量表 (5-point Likert Scale) 表示: 具体的提示内容可在 [此处](https://github.com/aymeric-roucher/benchmark_agents/blob/master/scripts/prompts.py) 查看。
### 模型
我们评估了一些强大的开源模型:
- [Llama2-70b-chat](https://huggingface.co/meta-llama/Llama-2-70b-chat-hf)
- [Mixtral-8x7B-Instruct-v0.1](https://huggingface.co/mistralai/Mixtral-8x7B-Instruct-v0.1)
- [OpenHermes-2.5-Mistral-7B](https://huggingface.co/teknium/OpenHermes-2.5-Mistral-7B)
- [Zephyr-7b-beta](https://huggingface.co/HuggingFaceH4/zephyr-7b-beta)
- [SOLAR-10.7B-Instruct-v1.0](https://huggingface.co/upstage/SOLAR-10.7B-Instruct-v1.0)
这些模型通过 LangChain 的 [ReAct 实现](https://github.com/langchain-ai/langchain/tree/021b0484a8d9e8cf0c84bc164fb904202b9e4736/libs/langchain/langchain/agents/react) 进行了评估
这意味着我们引导它们按照以下格式输出函数调用:
```json
{
"action": $TOOL_NAME,
"action_input": $INPUT
}
```
作为比较,我们还利用 LangChain 的 [专为 OpenAI 设计的智能体](https://github.com/langchain-ai/langchain/tree/021b0484a8d9e8cf0c84bc164fb904202b9e4736/libs/langchain/langchain/agents/openai_functions_agent) 对 GPT-3.5 和 GPT-4 在相同的例子上进行了评估。
由于这涉及到它们自己的函数调用模板,并在此基础上进行了微调,这意味着 OpenAI 模型应当展现出最佳性能。
### 结果
鉴于开源模型并没有针对特定的输出格式进行特别的函数调用微调,相较于 OpenAI 智能体,它们处于稍微不利的位置。
尽管存在这样的不利条件,某些模型的表现依然非常优秀!💪
这是 Mixtral-8x7B 回答“贵阳和塔城哪个城市人口更多?”问题的示例:
```markdown
思考: 为了回答这个问题,我需要找到贵阳和塔城当前的人口数。我将利用搜索工具来查询这些信息。
操作:
{
"action": "search",
"action_input": " 贵阳当前人口 "
}
4.697 百万 (2016 年)
思考: 我已获得贵阳的人口数,为 4.697 百万 (截至 2016 年)。现在,我将再次使用搜索工具来确定塔城的人口数。
操作:
{
"action": "search",
"action_input": " 塔城当前人口 "
}
人口: 新疆: 塔城的数据在 2021 年报告为 1,110,000 人。这比 2020 年的 1,138,638 人有所下降。
人口: 新疆: 塔城的数据每年更新,从 2005 年 12 月到 2021 年,平均人口为 1,023,000 人,共 17 次观察。
我已确定塔城的人口约为 1.11 百万 (截至 2021 年)。对比两地人口,贵阳的人口超过塔城。
思考: 我现在清楚最终答案了
最终答案: 贵阳的人口更多,约为 4.697 百万 (截至 2016 年),而塔城的人口约为 1.11 百万 (截至 2021 年)。
```
这是各模型在我们的评估数据集上的基准测试结果 (原始平均分按 1-5 的评分量表,已转换为 0-100%的量表以便阅读):
<p align="center">
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/blog/open-source-llms-as-agents/benchmark_agents.png" alt="benchmark of agents performance" width="90%">
</p>
正如所见,一些开源模型在推动智能体工作流程方面表现欠佳: 虽然对于小型的 Zephyr-7b 而言这是预期之中的,但令人惊讶的是 Llama2-70b 的表现相当差。
👉 令人惊讶的是,**Mixtral-8x7B 的表现非常出色: 它甚至超越了 GPT-3.5!**🏆
这是即开即用的性能: _**与 GPT-3.5 不同的是,据我们所知,Mixtral 没有针对智能体工作流程进行过微调**_ ,这在一定程度上影响了其性能。例如,在 GAIA 上,因为 Mixtral 尝试使用格式不正确的参数调用工具,导致 10%的问题失败。 **如果对功能调用和任务规划技能进行适当的微调,Mixtral 的得分可能会更高。**
➡️ 我们强烈建议开源开发者开始针对智能体对 Mixtral 进行微调,以超越下一个挑战者: GPT-4!🚀
**结语:**
- 虽然 GAIA 基准测试仅在一小部分问题和少数工具上进行了尝试,但它似乎是智能体工作流程整体模型性能的一个非常强大的指标,因为它通常涉及多个推理步骤和严格的逻辑。
- 智能体工作流程使 LLMs 能够提升性能: 例如,在 GSM8K 上,[GPT-4 的技术报告](https://arxiv.org/pdf/2303.08774.pdf) 显示,使用 5 次 CoT 提示的情况下得分为 92%: 通过提供一个计算器,我们能够在零次提示的情况下达到 95%。对于 Mixtral-8x7B,[LLM 排行榜](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard) 报告了使用 5 次提示的情况下为 57.6%,而我们在零次提示的情况下达到了 73%。 _(记住,我们仅测试了 GSM8K 的 20 个问题)_
| 9 |
0 | hf_public_repos/api-inference-community/docker_images/speechbrain/app | hf_public_repos/api-inference-community/docker_images/speechbrain/app/pipelines/text_to_speech.py | from typing import Tuple
import numpy as np
from app.common import ModelType, get_type, get_vocoder_model_id
from app.pipelines import Pipeline
from speechbrain.inference import HIFIGAN, FastSpeech2, Tacotron2
class TextToSpeechPipeline(Pipeline):
def __init__(self, model_id: str):
model_type = get_type(model_id)
if model_type is ModelType.TACOTRON2:
self.model = Tacotron2.from_hparams(source=model_id)
self.type = "tacotron2"
elif model_type is ModelType.FASTSPEECH2:
self.model = FastSpeech2.from_hparams(source=model_id)
self.type = "fastspeech2"
else:
raise ValueError(f"{model_type.value} is invalid for text-to-speech")
vocoder_type = get_type(model_id, "vocoder_interface")
vocoder_model_id = get_vocoder_model_id(model_id)
if vocoder_type is ModelType.HIFIGAN:
self.vocoder_model = HIFIGAN.from_hparams(source=vocoder_model_id)
else:
raise ValueError(
f"{vocoder_type.value} is invalid vocoder for text-to-speech"
)
self.sampling_rate = self.model.hparams.sample_rate
def __call__(self, inputs: str) -> Tuple[np.array, int]:
"""
Args:
inputs (:obj:`str`):
The text to generate audio from
Return:
A :obj:`np.array` and a :obj:`int`: The raw waveform as a numpy array, and the sampling rate as an int.
"""
if not inputs.replace("\0", "").strip():
inputs = "Empty query"
if self.type == "tacotron2":
mel_output, _, _ = self.model.encode_text(inputs)
elif self.type == "fastspeech2":
mel_output, _, _, _ = self.model.encode_text(
[inputs], pace=1.0, pitch_rate=1.0, energy_rate=1.0
)
waveforms = self.vocoder_model.decode_batch(mel_output).numpy()
return waveforms, self.sampling_rate
| 0 |
0 | hf_public_repos/api-inference-community/docker_images/speechbrain/app | hf_public_repos/api-inference-community/docker_images/speechbrain/app/pipelines/text2text_generation.py | from typing import Dict, List
from app.common import ModelType, get_type
from app.pipelines import Pipeline
from speechbrain.inference import GraphemeToPhoneme
POSTPROCESSING = {ModelType.GRAPHEMETOPHONEME: lambda output: "-".join(output)}
class TextToTextPipeline(Pipeline):
def __init__(self, model_id: str):
model_type = get_type(model_id)
if model_type == ModelType.GRAPHEMETOPHONEME:
self.model = GraphemeToPhoneme.from_hparams(source=model_id)
else:
raise ValueError(f"{model_type.value} is invalid for text-to-text")
self.post_process = POSTPROCESSING.get(model_type, lambda output: output)
def __call__(self, inputs: str) -> List[Dict[str, str]]:
"""
Args:
inputs (:obj:`str`):
The input text
Return:
A :obj:`list`:. The list contains a single item that is a dict {"text": the model output}
"""
output = self.model(inputs)
output = self.post_process(output)
return [{"generated_text": output}]
| 1 |
0 | hf_public_repos/api-inference-community/docker_images/speechbrain/app | hf_public_repos/api-inference-community/docker_images/speechbrain/app/pipelines/audio_classification.py | from typing import Dict, List
import numpy as np
import torch
from app.common import ModelType, get_type
from app.pipelines import Pipeline
from speechbrain.inference import EncoderClassifier
class AudioClassificationPipeline(Pipeline):
def __init__(self, model_id: str):
model_type = get_type(model_id)
if model_type != ModelType.ENCODERCLASSIFIER:
raise ValueError(f"{model_type.value} is invalid for audio-classification")
self.model = EncoderClassifier.from_hparams(source=model_id)
self.top_k = 5
# Please define a `self.sampling_rate` for this pipeline
# to automatically read the input correctly
self.sampling_rate = 16000
def __call__(self, inputs: np.array) -> List[Dict[str, float]]:
"""
Args:
inputs (:obj:`np.array`):
The raw waveform of audio received. By default at 16KHz.
Return:
A :obj:`list`:. The object returned should be a list like [{"label": "text", "score": 0.9939950108528137}] containing :
- "label": A string representing what the label/class is. There can be multiple labels.
- "score": A score between 0 and 1 describing how confident the model is for this label/class.
"""
batch = torch.from_numpy(inputs).unsqueeze(0)
rel_length = torch.tensor([1.0])
probs, _, _, _ = self.model.classify_batch(batch, rel_length)
probs = torch.softmax(probs[0], dim=0)
labels = self.model.hparams.label_encoder.decode_ndim(range(len(probs)))
results = []
for prob, label in sorted(zip(probs, labels), reverse=True)[: self.top_k]:
results.append({"label": label, "score": prob.item()})
return results
| 2 |
0 | hf_public_repos/api-inference-community/docker_images/speechbrain/app | hf_public_repos/api-inference-community/docker_images/speechbrain/app/pipelines/base.py | from abc import ABC, abstractmethod
from typing import Any
class Pipeline(ABC):
@abstractmethod
def __init__(self, model_id: str):
raise NotImplementedError("Pipelines should implement an __init__ method")
@abstractmethod
def __call__(self, inputs: Any) -> Any:
raise NotImplementedError("Pipelines should implement a __call__ method")
class PipelineException(Exception):
pass
| 3 |
0 | hf_public_repos/api-inference-community/docker_images/speechbrain/app | hf_public_repos/api-inference-community/docker_images/speechbrain/app/pipelines/__init__.py | from app.pipelines.base import Pipeline, PipelineException # isort:skip
from app.pipelines.audio_classification import AudioClassificationPipeline
from app.pipelines.audio_to_audio import AudioToAudioPipeline
from app.pipelines.automatic_speech_recognition import (
AutomaticSpeechRecognitionPipeline,
)
from app.pipelines.text2text_generation import TextToTextPipeline
from app.pipelines.text_to_speech import TextToSpeechPipeline
| 4 |
0 | hf_public_repos/api-inference-community/docker_images/speechbrain/app | hf_public_repos/api-inference-community/docker_images/speechbrain/app/pipelines/audio_to_audio.py | from typing import List, Tuple
import numpy as np
import torch
from app.common import ModelType, get_type
from app.pipelines import Pipeline
from speechbrain.inference import (
SepformerSeparation,
SpectralMaskEnhancement,
WaveformEnhancement,
)
class AudioToAudioPipeline(Pipeline):
def __init__(self, model_id: str):
model_type = get_type(model_id)
if model_type == ModelType.SEPFORMERSEPARATION:
self.model = SepformerSeparation.from_hparams(source=model_id)
self.type = "audio-source-separation"
elif model_type == ModelType.SPECTRALMASKENHANCEMENT:
self.model = SpectralMaskEnhancement.from_hparams(source=model_id)
self.type = "speech-enhancement"
elif model_type == ModelType.WAVEFORMENHANCEMENT:
self.type = "speech-enhancement"
self.model = WaveformEnhancement.from_hparams(source=model_id)
else:
raise ValueError(f"{model_type.value} is invalid for audio-to-audio")
self.sampling_rate = self.model.hparams.sample_rate
def __call__(self, inputs: np.array) -> Tuple[np.array, int, List[str]]:
"""
Args:
inputs (:obj:`np.array`):
The raw waveform of audio received. By default sampled at `self.sampling_rate`.
The shape of this array is `T`, where `T` is the time axis
Return:
A :obj:`tuple` containing:
- :obj:`np.array`:
The return shape of the array must be `C'`x`T'`
- a :obj:`int`: the sampling rate as an int in Hz.
- a :obj:`List[str]`: the annotation for each out channel.
This can be the name of the instruments for audio source separation
or some annotation for speech enhancement. The length must be `C'`.
"""
if self.type == "speech-enhancement":
return self.enhance(inputs)
elif self.type == "audio-source-separation":
return self.separate(inputs)
else:
return self.separate(inputs)
def separate(self, inputs):
mix = torch.from_numpy(inputs)
est_sources = self.model.separate_batch(mix.unsqueeze(0))
est_sources = est_sources[0]
# C x T
est_sources = est_sources.transpose(1, 0)
# normalize for loudness
est_sources = est_sources / est_sources.abs().max(dim=1, keepdim=True).values
n = est_sources.shape[0]
labels = [f"label_{i}" for i in range(n)]
return est_sources.numpy(), int(self.sampling_rate), labels
def enhance(self, inputs: np.array):
mix = torch.from_numpy(inputs)
enhanced = self.model.enhance_batch(mix.unsqueeze(0))
# C x T
labels = ["speech_enhanced"]
return enhanced.numpy(), int(self.sampling_rate), labels
| 5 |
0 | hf_public_repos/api-inference-community/docker_images/speechbrain/app | hf_public_repos/api-inference-community/docker_images/speechbrain/app/pipelines/automatic_speech_recognition.py | from typing import Dict
import numpy as np
import torch
from app.common import ModelType, get_type
from app.pipelines import Pipeline
from speechbrain.inference import EncoderASR, EncoderDecoderASR, WhisperASR
class AutomaticSpeechRecognitionPipeline(Pipeline):
def __init__(self, model_id: str):
model_type = get_type(model_id)
if model_type is ModelType.ENCODERASR:
self.model = EncoderASR.from_hparams(source=model_id)
elif model_type is ModelType.ENCODERDECODERASR:
self.model = EncoderDecoderASR.from_hparams(source=model_id)
# Reduce latency
self.model.mods.decoder.beam_size = 1
elif model_type is ModelType.WHISPERASR:
self.model = WhisperASR.from_hparams(source=model_id)
else:
raise ValueError(
f"{model_type.value} is invalid for automatic-speech-recognition"
)
# Please define a `self.sampling_rate` for this pipeline
# to automatically read the input correctly
self.sampling_rate = self.model.hparams.sample_rate
def __call__(self, inputs: np.array) -> Dict[str, str]:
"""
Args:
inputs (:obj:`np.array`):
The raw waveform of audio received. By default at 16KHz.
Check `app.validation` if a different sample rate is required
or if it depends on the model
Return:
A :obj:`dict`:. The object return should be liked {"text": "XXX"} containing
the detected language from the input audio
"""
batch = torch.from_numpy(inputs).unsqueeze(0)
rel_length = torch.tensor([1.0])
predicted_words, predicted_tokens = self.model.transcribe_batch(
batch, rel_length
)
return {"text": predicted_words[0]}
| 6 |
0 | hf_public_repos/api-inference-community/docker_images/speechbrain | hf_public_repos/api-inference-community/docker_images/speechbrain/tests/test_docker_build.py | import os
import subprocess
from unittest import TestCase
class cd:
"""Context manager for changing the current working directory"""
def __init__(self, newPath):
self.newPath = os.path.expanduser(newPath)
def __enter__(self):
self.savedPath = os.getcwd()
os.chdir(self.newPath)
def __exit__(self, etype, value, traceback):
os.chdir(self.savedPath)
class DockerBuildTestCase(TestCase):
def test_can_build_docker_image(self):
with cd(os.path.dirname(os.path.dirname(__file__))):
subprocess.check_output(["docker", "build", "."])
| 7 |
0 | hf_public_repos/api-inference-community/docker_images/speechbrain | hf_public_repos/api-inference-community/docker_images/speechbrain/tests/test_api_audio_to_audio.py | import base64
import json
import os
from unittest import TestCase, skipIf
from api_inference_community.validation import ffmpeg_read
from app.main import ALLOWED_TASKS
from parameterized import parameterized_class
from starlette.testclient import TestClient
from tests.test_api import TESTABLE_MODELS
@skipIf(
"audio-to-audio" not in ALLOWED_TASKS,
"audio-to-audio not implemented",
)
@parameterized_class(
[{"model_id": model_id} for model_id in TESTABLE_MODELS["audio-to-audio"]]
)
class AudioToAudioTestCase(TestCase):
def setUp(self):
self.old_model_id = os.getenv("MODEL_ID")
self.old_task = os.getenv("TASK")
os.environ["MODEL_ID"] = self.model_id
os.environ["TASK"] = "audio-to-audio"
from app.main import app
self.app = app
@classmethod
def setUpClass(cls):
from app.main import get_pipeline
get_pipeline.cache_clear()
def tearDown(self):
if self.old_model_id is not None:
os.environ["MODEL_ID"] = self.old_model_id
else:
del os.environ["MODEL_ID"]
if self.old_task is not None:
os.environ["TASK"] = self.old_task
else:
del os.environ["TASK"]
def read(self, filename: str) -> bytes:
dirname = os.path.dirname(os.path.abspath(__file__))
filename = os.path.join(dirname, "samples", filename)
with open(filename, "rb") as f:
bpayload = f.read()
return bpayload
def test_simple(self):
bpayload = self.read("sample1.flac")
with TestClient(self.app) as client:
response = client.post("/", data=bpayload)
self.assertEqual(
response.status_code,
200,
)
self.assertEqual(response.headers["content-type"], "application/json")
audio = json.loads(response.content)
self.assertTrue(isinstance(audio, list))
self.assertEqual(set(audio[0].keys()), {"blob", "content-type", "label"})
data = base64.b64decode(audio[0]["blob"])
wavform = ffmpeg_read(data, 16000)
self.assertGreater(wavform.shape[0], 1000)
self.assertTrue(isinstance(audio[0]["content-type"], str))
self.assertTrue(isinstance(audio[0]["label"], str))
def test_malformed_audio(self):
bpayload = self.read("malformed.flac")
with TestClient(self.app) as client:
response = client.post("/", data=bpayload)
self.assertEqual(
response.status_code,
400,
)
self.assertEqual(response.content, b'{"error":"Malformed soundfile"}')
def test_dual_channel_audiofile(self):
bpayload = self.read("sample1_dual.ogg")
with TestClient(self.app) as client:
response = client.post("/", data=bpayload)
self.assertEqual(
response.status_code,
200,
)
self.assertEqual(response.headers["content-type"], "application/json")
audio = json.loads(response.content)
self.assertTrue(isinstance(audio, list))
self.assertEqual(set(audio[0].keys()), {"blob", "content-type", "label"})
data = base64.b64decode(audio[0]["blob"])
wavform = ffmpeg_read(data, 16000)
self.assertGreater(wavform.shape[0], 1000)
self.assertTrue(isinstance(audio[0]["content-type"], str))
self.assertTrue(isinstance(audio[0]["label"], str))
def test_webm_audiofile(self):
bpayload = self.read("sample1.webm")
with TestClient(self.app) as client:
response = client.post("/", data=bpayload)
self.assertEqual(
response.status_code,
200,
)
self.assertEqual(response.headers["content-type"], "application/json")
audio = json.loads(response.content)
self.assertTrue(isinstance(audio, list))
self.assertEqual(set(audio[0].keys()), {"blob", "content-type", "label"})
data = base64.b64decode(audio[0]["blob"])
wavform = ffmpeg_read(data, 16000)
self.assertGreater(wavform.shape[0], 1000)
self.assertTrue(isinstance(audio[0]["content-type"], str))
self.assertTrue(isinstance(audio[0]["label"], str))
| 8 |
0 | hf_public_repos/api-inference-community/docker_images/speechbrain | hf_public_repos/api-inference-community/docker_images/speechbrain/tests/test_api_automatic_speech_recognition.py | import json
import os
from unittest import TestCase, skipIf
from app.main import ALLOWED_TASKS
from parameterized import parameterized_class
from starlette.testclient import TestClient
from tests.test_api import TESTABLE_MODELS
@skipIf(
"automatic-speech-recognition" not in ALLOWED_TASKS,
"automatic-speech-recognition not implemented",
)
@parameterized_class(
[
{"model_id": model_id}
for model_id in TESTABLE_MODELS["automatic-speech-recognition"]
]
)
class AutomaticSpeecRecognitionTestCase(TestCase):
def setUp(self):
self.old_model_id = os.getenv("MODEL_ID")
self.old_task = os.getenv("TASK")
os.environ["MODEL_ID"] = self.model_id
os.environ["TASK"] = "automatic-speech-recognition"
from app.main import app
self.app = app
@classmethod
def setUpClass(cls):
from app.main import get_pipeline
get_pipeline.cache_clear()
def tearDown(self):
if self.old_model_id is not None:
os.environ["MODEL_ID"] = self.old_model_id
else:
del os.environ["MODEL_ID"]
if self.old_task is not None:
os.environ["TASK"] = self.old_task
else:
del os.environ["TASK"]
def read(self, filename: str) -> bytes:
dirname = os.path.dirname(os.path.abspath(__file__))
filename = os.path.join(dirname, "samples", filename)
with open(filename, "rb") as f:
bpayload = f.read()
return bpayload
def test_simple(self):
bpayload = self.read("sample1.flac")
with TestClient(self.app) as client:
response = client.post("/", data=bpayload)
self.assertEqual(
response.status_code,
200,
)
content = json.loads(response.content)
self.assertEqual(set(content.keys()), {"text"})
def test_malformed_audio(self):
bpayload = self.read("malformed.flac")
with TestClient(self.app) as client:
response = client.post("/", data=bpayload)
self.assertEqual(
response.status_code,
400,
)
self.assertEqual(response.content, b'{"error":"Malformed soundfile"}')
def test_dual_channel_audiofile(self):
bpayload = self.read("sample1_dual.ogg")
with TestClient(self.app) as client:
response = client.post("/", data=bpayload)
self.assertEqual(
response.status_code,
200,
)
content = json.loads(response.content)
self.assertEqual(set(content.keys()), {"text"})
def test_webm_audiofile(self):
bpayload = self.read("sample1.webm")
with TestClient(self.app) as client:
response = client.post("/", data=bpayload)
self.assertEqual(
response.status_code,
200,
)
content = json.loads(response.content)
self.assertEqual(set(content.keys()), {"text"})
| 9 |
0 | hf_public_repos/candle/candle-wasm-examples/yolo | hf_public_repos/candle/candle-wasm-examples/yolo/src/lib.rs | mod app;
pub mod coco_classes;
pub mod model;
pub mod worker;
pub use app::App;
pub use worker::Worker;
| 0 |
0 | hf_public_repos/candle/candle-wasm-examples/yolo | hf_public_repos/candle/candle-wasm-examples/yolo/src/coco_classes.rs | pub const NAMES: [&str; 80] = [
"person",
"bicycle",
"car",
"motorbike",
"aeroplane",
"bus",
"train",
"truck",
"boat",
"traffic light",
"fire hydrant",
"stop sign",
"parking meter",
"bench",
"bird",
"cat",
"dog",
"horse",
"sheep",
"cow",
"elephant",
"bear",
"zebra",
"giraffe",
"backpack",
"umbrella",
"handbag",
"tie",
"suitcase",
"frisbee",
"skis",
"snowboard",
"sports ball",
"kite",
"baseball bat",
"baseball glove",
"skateboard",
"surfboard",
"tennis racket",
"bottle",
"wine glass",
"cup",
"fork",
"knife",
"spoon",
"bowl",
"banana",
"apple",
"sandwich",
"orange",
"broccoli",
"carrot",
"hot dog",
"pizza",
"donut",
"cake",
"chair",
"sofa",
"pottedplant",
"bed",
"diningtable",
"toilet",
"tvmonitor",
"laptop",
"mouse",
"remote",
"keyboard",
"cell phone",
"microwave",
"oven",
"toaster",
"sink",
"refrigerator",
"book",
"clock",
"vase",
"scissors",
"teddy bear",
"hair drier",
"toothbrush",
];
| 1 |
0 | hf_public_repos/candle/candle-wasm-examples/yolo | hf_public_repos/candle/candle-wasm-examples/yolo/src/model.rs | use candle::{DType, IndexOp, Result, Tensor, D};
use candle_nn::{
batch_norm, conv2d, conv2d_no_bias, BatchNorm, Conv2d, Conv2dConfig, Module, VarBuilder,
};
use image::DynamicImage;
// Model architecture from https://github.com/ultralytics/ultralytics/issues/189
// https://github.com/tinygrad/tinygrad/blob/master/examples/yolov8.py
#[derive(Clone, Copy, PartialEq, Debug)]
pub struct Multiples {
depth: f64,
width: f64,
ratio: f64,
}
impl Multiples {
pub fn n() -> Self {
Self {
depth: 0.33,
width: 0.25,
ratio: 2.0,
}
}
pub fn s() -> Self {
Self {
depth: 0.33,
width: 0.50,
ratio: 2.0,
}
}
pub fn m() -> Self {
Self {
depth: 0.67,
width: 0.75,
ratio: 1.5,
}
}
pub fn l() -> Self {
Self {
depth: 1.00,
width: 1.00,
ratio: 1.0,
}
}
pub fn x() -> Self {
Self {
depth: 1.00,
width: 1.25,
ratio: 1.0,
}
}
fn filters(&self) -> (usize, usize, usize) {
let f1 = (256. * self.width) as usize;
let f2 = (512. * self.width) as usize;
let f3 = (512. * self.width * self.ratio) as usize;
(f1, f2, f3)
}
}
#[derive(Debug)]
struct Upsample {
scale_factor: usize,
}
impl Upsample {
fn new(scale_factor: usize) -> Result<Self> {
Ok(Upsample { scale_factor })
}
}
impl Module for Upsample {
fn forward(&self, xs: &Tensor) -> candle::Result<Tensor> {
let (_b_size, _channels, h, w) = xs.dims4()?;
xs.upsample_nearest2d(self.scale_factor * h, self.scale_factor * w)
}
}
#[derive(Debug)]
struct ConvBlock {
conv: Conv2d,
bn: BatchNorm,
}
impl ConvBlock {
fn load(
vb: VarBuilder,
c1: usize,
c2: usize,
k: usize,
stride: usize,
padding: Option<usize>,
) -> Result<Self> {
let padding = padding.unwrap_or(k / 2);
let cfg = Conv2dConfig {
padding,
stride,
groups: 1,
dilation: 1,
};
let conv = conv2d_no_bias(c1, c2, k, cfg, vb.pp("conv"))?;
let bn = batch_norm(c2, 1e-3, vb.pp("bn"))?;
Ok(Self { conv, bn })
}
}
impl Module for ConvBlock {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
let xs = self.conv.forward(xs)?.apply_t(&self.bn, false)?;
candle_nn::ops::silu(&xs)
}
}
#[derive(Debug)]
struct Bottleneck {
cv1: ConvBlock,
cv2: ConvBlock,
residual: bool,
}
impl Bottleneck {
fn load(vb: VarBuilder, c1: usize, c2: usize, shortcut: bool) -> Result<Self> {
let channel_factor = 1.;
let c_ = (c2 as f64 * channel_factor) as usize;
let cv1 = ConvBlock::load(vb.pp("cv1"), c1, c_, 3, 1, None)?;
let cv2 = ConvBlock::load(vb.pp("cv2"), c_, c2, 3, 1, None)?;
let residual = c1 == c2 && shortcut;
Ok(Self { cv1, cv2, residual })
}
}
impl Module for Bottleneck {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
let ys = self.cv2.forward(&self.cv1.forward(xs)?)?;
if self.residual {
xs + ys
} else {
Ok(ys)
}
}
}
#[derive(Debug)]
struct C2f {
cv1: ConvBlock,
cv2: ConvBlock,
bottleneck: Vec<Bottleneck>,
}
impl C2f {
fn load(vb: VarBuilder, c1: usize, c2: usize, n: usize, shortcut: bool) -> Result<Self> {
let c = (c2 as f64 * 0.5) as usize;
let cv1 = ConvBlock::load(vb.pp("cv1"), c1, 2 * c, 1, 1, None)?;
let cv2 = ConvBlock::load(vb.pp("cv2"), (2 + n) * c, c2, 1, 1, None)?;
let mut bottleneck = Vec::with_capacity(n);
for idx in 0..n {
let b = Bottleneck::load(vb.pp(format!("bottleneck.{idx}")), c, c, shortcut)?;
bottleneck.push(b)
}
Ok(Self {
cv1,
cv2,
bottleneck,
})
}
}
impl Module for C2f {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
let ys = self.cv1.forward(xs)?;
let mut ys = ys.chunk(2, 1)?;
for m in self.bottleneck.iter() {
ys.push(m.forward(ys.last().unwrap())?)
}
let zs = Tensor::cat(ys.as_slice(), 1)?;
self.cv2.forward(&zs)
}
}
#[derive(Debug)]
struct Sppf {
cv1: ConvBlock,
cv2: ConvBlock,
k: usize,
}
impl Sppf {
fn load(vb: VarBuilder, c1: usize, c2: usize, k: usize) -> Result<Self> {
let c_ = c1 / 2;
let cv1 = ConvBlock::load(vb.pp("cv1"), c1, c_, 1, 1, None)?;
let cv2 = ConvBlock::load(vb.pp("cv2"), c_ * 4, c2, 1, 1, None)?;
Ok(Self { cv1, cv2, k })
}
}
impl Module for Sppf {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
let (_, _, _, _) = xs.dims4()?;
let xs = self.cv1.forward(xs)?;
let xs2 = xs
.pad_with_zeros(2, self.k / 2, self.k / 2)?
.pad_with_zeros(3, self.k / 2, self.k / 2)?
.max_pool2d_with_stride(self.k, 1)?;
let xs3 = xs2
.pad_with_zeros(2, self.k / 2, self.k / 2)?
.pad_with_zeros(3, self.k / 2, self.k / 2)?
.max_pool2d_with_stride(self.k, 1)?;
let xs4 = xs3
.pad_with_zeros(2, self.k / 2, self.k / 2)?
.pad_with_zeros(3, self.k / 2, self.k / 2)?
.max_pool2d_with_stride(self.k, 1)?;
self.cv2.forward(&Tensor::cat(&[&xs, &xs2, &xs3, &xs4], 1)?)
}
}
#[derive(Debug)]
struct Dfl {
conv: Conv2d,
num_classes: usize,
}
impl Dfl {
fn load(vb: VarBuilder, num_classes: usize) -> Result<Self> {
let conv = conv2d_no_bias(num_classes, 1, 1, Default::default(), vb.pp("conv"))?;
Ok(Self { conv, num_classes })
}
}
impl Module for Dfl {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
let (b_sz, _channels, anchors) = xs.dims3()?;
let xs = xs
.reshape((b_sz, 4, self.num_classes, anchors))?
.transpose(2, 1)?;
let xs = candle_nn::ops::softmax(&xs, 1)?;
self.conv.forward(&xs)?.reshape((b_sz, 4, anchors))
}
}
#[derive(Debug)]
struct DarkNet {
b1_0: ConvBlock,
b1_1: ConvBlock,
b2_0: C2f,
b2_1: ConvBlock,
b2_2: C2f,
b3_0: ConvBlock,
b3_1: C2f,
b4_0: ConvBlock,
b4_1: C2f,
b5: Sppf,
}
impl DarkNet {
fn load(vb: VarBuilder, m: Multiples) -> Result<Self> {
let (w, r, d) = (m.width, m.ratio, m.depth);
let b1_0 = ConvBlock::load(vb.pp("b1.0"), 3, (64. * w) as usize, 3, 2, Some(1))?;
let b1_1 = ConvBlock::load(
vb.pp("b1.1"),
(64. * w) as usize,
(128. * w) as usize,
3,
2,
Some(1),
)?;
let b2_0 = C2f::load(
vb.pp("b2.0"),
(128. * w) as usize,
(128. * w) as usize,
(3. * d).round() as usize,
true,
)?;
let b2_1 = ConvBlock::load(
vb.pp("b2.1"),
(128. * w) as usize,
(256. * w) as usize,
3,
2,
Some(1),
)?;
let b2_2 = C2f::load(
vb.pp("b2.2"),
(256. * w) as usize,
(256. * w) as usize,
(6. * d).round() as usize,
true,
)?;
let b3_0 = ConvBlock::load(
vb.pp("b3.0"),
(256. * w) as usize,
(512. * w) as usize,
3,
2,
Some(1),
)?;
let b3_1 = C2f::load(
vb.pp("b3.1"),
(512. * w) as usize,
(512. * w) as usize,
(6. * d).round() as usize,
true,
)?;
let b4_0 = ConvBlock::load(
vb.pp("b4.0"),
(512. * w) as usize,
(512. * w * r) as usize,
3,
2,
Some(1),
)?;
let b4_1 = C2f::load(
vb.pp("b4.1"),
(512. * w * r) as usize,
(512. * w * r) as usize,
(3. * d).round() as usize,
true,
)?;
let b5 = Sppf::load(
vb.pp("b5.0"),
(512. * w * r) as usize,
(512. * w * r) as usize,
5,
)?;
Ok(Self {
b1_0,
b1_1,
b2_0,
b2_1,
b2_2,
b3_0,
b3_1,
b4_0,
b4_1,
b5,
})
}
fn forward(&self, xs: &Tensor) -> Result<(Tensor, Tensor, Tensor)> {
let x1 = self.b1_1.forward(&self.b1_0.forward(xs)?)?;
let x2 = self
.b2_2
.forward(&self.b2_1.forward(&self.b2_0.forward(&x1)?)?)?;
let x3 = self.b3_1.forward(&self.b3_0.forward(&x2)?)?;
let x4 = self.b4_1.forward(&self.b4_0.forward(&x3)?)?;
let x5 = self.b5.forward(&x4)?;
Ok((x2, x3, x5))
}
}
#[derive(Debug)]
struct YoloV8Neck {
up: Upsample,
n1: C2f,
n2: C2f,
n3: ConvBlock,
n4: C2f,
n5: ConvBlock,
n6: C2f,
}
impl YoloV8Neck {
fn load(vb: VarBuilder, m: Multiples) -> Result<Self> {
let up = Upsample::new(2)?;
let (w, r, d) = (m.width, m.ratio, m.depth);
let n = (3. * d).round() as usize;
let n1 = C2f::load(
vb.pp("n1"),
(512. * w * (1. + r)) as usize,
(512. * w) as usize,
n,
false,
)?;
let n2 = C2f::load(
vb.pp("n2"),
(768. * w) as usize,
(256. * w) as usize,
n,
false,
)?;
let n3 = ConvBlock::load(
vb.pp("n3"),
(256. * w) as usize,
(256. * w) as usize,
3,
2,
Some(1),
)?;
let n4 = C2f::load(
vb.pp("n4"),
(768. * w) as usize,
(512. * w) as usize,
n,
false,
)?;
let n5 = ConvBlock::load(
vb.pp("n5"),
(512. * w) as usize,
(512. * w) as usize,
3,
2,
Some(1),
)?;
let n6 = C2f::load(
vb.pp("n6"),
(512. * w * (1. + r)) as usize,
(512. * w * r) as usize,
n,
false,
)?;
Ok(Self {
up,
n1,
n2,
n3,
n4,
n5,
n6,
})
}
fn forward(&self, p3: &Tensor, p4: &Tensor, p5: &Tensor) -> Result<(Tensor, Tensor, Tensor)> {
let x = self
.n1
.forward(&Tensor::cat(&[&self.up.forward(p5)?, p4], 1)?)?;
let head_1 = self
.n2
.forward(&Tensor::cat(&[&self.up.forward(&x)?, p3], 1)?)?;
let head_2 = self
.n4
.forward(&Tensor::cat(&[&self.n3.forward(&head_1)?, &x], 1)?)?;
let head_3 = self
.n6
.forward(&Tensor::cat(&[&self.n5.forward(&head_2)?, p5], 1)?)?;
Ok((head_1, head_2, head_3))
}
}
#[derive(Debug)]
struct DetectionHead {
dfl: Dfl,
cv2: [(ConvBlock, ConvBlock, Conv2d); 3],
cv3: [(ConvBlock, ConvBlock, Conv2d); 3],
ch: usize,
no: usize,
}
#[derive(Debug)]
struct PoseHead {
detect: DetectionHead,
cv4: [(ConvBlock, ConvBlock, Conv2d); 3],
kpt: (usize, usize),
}
fn make_anchors(
xs0: &Tensor,
xs1: &Tensor,
xs2: &Tensor,
(s0, s1, s2): (usize, usize, usize),
grid_cell_offset: f64,
) -> Result<(Tensor, Tensor)> {
let dev = xs0.device();
let mut anchor_points = vec![];
let mut stride_tensor = vec![];
for (xs, stride) in [(xs0, s0), (xs1, s1), (xs2, s2)] {
// xs is only used to extract the h and w dimensions.
let (_, _, h, w) = xs.dims4()?;
let sx = (Tensor::arange(0, w as u32, dev)?.to_dtype(DType::F32)? + grid_cell_offset)?;
let sy = (Tensor::arange(0, h as u32, dev)?.to_dtype(DType::F32)? + grid_cell_offset)?;
let sx = sx
.reshape((1, sx.elem_count()))?
.repeat((h, 1))?
.flatten_all()?;
let sy = sy
.reshape((sy.elem_count(), 1))?
.repeat((1, w))?
.flatten_all()?;
anchor_points.push(Tensor::stack(&[&sx, &sy], D::Minus1)?);
stride_tensor.push((Tensor::ones(h * w, DType::F32, dev)? * stride as f64)?);
}
let anchor_points = Tensor::cat(anchor_points.as_slice(), 0)?;
let stride_tensor = Tensor::cat(stride_tensor.as_slice(), 0)?.unsqueeze(1)?;
Ok((anchor_points, stride_tensor))
}
struct DetectionHeadOut {
pred: Tensor,
anchors: Tensor,
strides: Tensor,
}
fn dist2bbox(distance: &Tensor, anchor_points: &Tensor) -> Result<Tensor> {
let chunks = distance.chunk(2, 1)?;
let lt = &chunks[0];
let rb = &chunks[1];
let x1y1 = anchor_points.sub(lt)?;
let x2y2 = anchor_points.add(rb)?;
let c_xy = ((&x1y1 + &x2y2)? * 0.5)?;
let wh = (&x2y2 - &x1y1)?;
Tensor::cat(&[c_xy, wh], 1)
}
impl DetectionHead {
fn load(vb: VarBuilder, nc: usize, filters: (usize, usize, usize)) -> Result<Self> {
let ch = 16;
let dfl = Dfl::load(vb.pp("dfl"), ch)?;
let c1 = usize::max(filters.0, nc);
let c2 = usize::max(filters.0 / 4, ch * 4);
let cv3 = [
Self::load_cv3(vb.pp("cv3.0"), c1, nc, filters.0)?,
Self::load_cv3(vb.pp("cv3.1"), c1, nc, filters.1)?,
Self::load_cv3(vb.pp("cv3.2"), c1, nc, filters.2)?,
];
let cv2 = [
Self::load_cv2(vb.pp("cv2.0"), c2, ch, filters.0)?,
Self::load_cv2(vb.pp("cv2.1"), c2, ch, filters.1)?,
Self::load_cv2(vb.pp("cv2.2"), c2, ch, filters.2)?,
];
let no = nc + ch * 4;
Ok(Self {
dfl,
cv2,
cv3,
ch,
no,
})
}
fn load_cv3(
vb: VarBuilder,
c1: usize,
nc: usize,
filter: usize,
) -> Result<(ConvBlock, ConvBlock, Conv2d)> {
let block0 = ConvBlock::load(vb.pp("0"), filter, c1, 3, 1, None)?;
let block1 = ConvBlock::load(vb.pp("1"), c1, c1, 3, 1, None)?;
let conv = conv2d(c1, nc, 1, Default::default(), vb.pp("2"))?;
Ok((block0, block1, conv))
}
fn load_cv2(
vb: VarBuilder,
c2: usize,
ch: usize,
filter: usize,
) -> Result<(ConvBlock, ConvBlock, Conv2d)> {
let block0 = ConvBlock::load(vb.pp("0"), filter, c2, 3, 1, None)?;
let block1 = ConvBlock::load(vb.pp("1"), c2, c2, 3, 1, None)?;
let conv = conv2d(c2, 4 * ch, 1, Default::default(), vb.pp("2"))?;
Ok((block0, block1, conv))
}
fn forward(&self, xs0: &Tensor, xs1: &Tensor, xs2: &Tensor) -> Result<DetectionHeadOut> {
let forward_cv = |xs, i: usize| {
let xs_2 = self.cv2[i].0.forward(xs)?;
let xs_2 = self.cv2[i].1.forward(&xs_2)?;
let xs_2 = self.cv2[i].2.forward(&xs_2)?;
let xs_3 = self.cv3[i].0.forward(xs)?;
let xs_3 = self.cv3[i].1.forward(&xs_3)?;
let xs_3 = self.cv3[i].2.forward(&xs_3)?;
Tensor::cat(&[&xs_2, &xs_3], 1)
};
let xs0 = forward_cv(xs0, 0)?;
let xs1 = forward_cv(xs1, 1)?;
let xs2 = forward_cv(xs2, 2)?;
let (anchors, strides) = make_anchors(&xs0, &xs1, &xs2, (8, 16, 32), 0.5)?;
let anchors = anchors.transpose(0, 1)?.unsqueeze(0)?;
let strides = strides.transpose(0, 1)?;
let reshape = |xs: &Tensor| {
let d = xs.dim(0)?;
let el = xs.elem_count();
xs.reshape((d, self.no, el / (d * self.no)))
};
let ys0 = reshape(&xs0)?;
let ys1 = reshape(&xs1)?;
let ys2 = reshape(&xs2)?;
let x_cat = Tensor::cat(&[ys0, ys1, ys2], 2)?;
let box_ = x_cat.i((.., ..self.ch * 4))?;
let cls = x_cat.i((.., self.ch * 4..))?;
let dbox = dist2bbox(&self.dfl.forward(&box_)?, &anchors)?;
let dbox = dbox.broadcast_mul(&strides)?;
let pred = Tensor::cat(&[dbox, candle_nn::ops::sigmoid(&cls)?], 1)?;
Ok(DetectionHeadOut {
pred,
anchors,
strides,
})
}
}
impl PoseHead {
// kpt: keypoints, (17, 3)
// nc: num-classes, 80
fn load(
vb: VarBuilder,
nc: usize,
kpt: (usize, usize),
filters: (usize, usize, usize),
) -> Result<Self> {
let detect = DetectionHead::load(vb.clone(), nc, filters)?;
let nk = kpt.0 * kpt.1;
let c4 = usize::max(filters.0 / 4, nk);
let cv4 = [
Self::load_cv4(vb.pp("cv4.0"), c4, nk, filters.0)?,
Self::load_cv4(vb.pp("cv4.1"), c4, nk, filters.1)?,
Self::load_cv4(vb.pp("cv4.2"), c4, nk, filters.2)?,
];
Ok(Self { detect, cv4, kpt })
}
fn load_cv4(
vb: VarBuilder,
c1: usize,
nc: usize,
filter: usize,
) -> Result<(ConvBlock, ConvBlock, Conv2d)> {
let block0 = ConvBlock::load(vb.pp("0"), filter, c1, 3, 1, None)?;
let block1 = ConvBlock::load(vb.pp("1"), c1, c1, 3, 1, None)?;
let conv = conv2d(c1, nc, 1, Default::default(), vb.pp("2"))?;
Ok((block0, block1, conv))
}
fn forward(&self, xs0: &Tensor, xs1: &Tensor, xs2: &Tensor) -> Result<Tensor> {
let d = self.detect.forward(xs0, xs1, xs2)?;
let forward_cv = |xs: &Tensor, i: usize| {
let (b_sz, _, h, w) = xs.dims4()?;
let xs = self.cv4[i].0.forward(xs)?;
let xs = self.cv4[i].1.forward(&xs)?;
let xs = self.cv4[i].2.forward(&xs)?;
xs.reshape((b_sz, self.kpt.0 * self.kpt.1, h * w))
};
let xs0 = forward_cv(xs0, 0)?;
let xs1 = forward_cv(xs1, 1)?;
let xs2 = forward_cv(xs2, 2)?;
let xs = Tensor::cat(&[xs0, xs1, xs2], D::Minus1)?;
let (b_sz, _nk, hw) = xs.dims3()?;
let xs = xs.reshape((b_sz, self.kpt.0, self.kpt.1, hw))?;
let ys01 = ((xs.i((.., .., 0..2))? * 2.)?.broadcast_add(&d.anchors)? - 0.5)?
.broadcast_mul(&d.strides)?;
let ys2 = candle_nn::ops::sigmoid(&xs.i((.., .., 2..3))?)?;
let ys = Tensor::cat(&[ys01, ys2], 2)?.flatten(1, 2)?;
Tensor::cat(&[d.pred, ys], 1)
}
}
#[derive(Debug)]
pub struct YoloV8 {
net: DarkNet,
fpn: YoloV8Neck,
head: DetectionHead,
}
impl YoloV8 {
pub fn load(vb: VarBuilder, m: Multiples, num_classes: usize) -> Result<Self> {
let net = DarkNet::load(vb.pp("net"), m)?;
let fpn = YoloV8Neck::load(vb.pp("fpn"), m)?;
let head = DetectionHead::load(vb.pp("head"), num_classes, m.filters())?;
Ok(Self { net, fpn, head })
}
}
impl Module for YoloV8 {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
let (xs1, xs2, xs3) = self.net.forward(xs)?;
let (xs1, xs2, xs3) = self.fpn.forward(&xs1, &xs2, &xs3)?;
Ok(self.head.forward(&xs1, &xs2, &xs3)?.pred)
}
}
#[derive(Debug)]
pub struct YoloV8Pose {
net: DarkNet,
fpn: YoloV8Neck,
head: PoseHead,
}
impl YoloV8Pose {
pub fn load(
vb: VarBuilder,
m: Multiples,
num_classes: usize,
kpt: (usize, usize),
) -> Result<Self> {
let net = DarkNet::load(vb.pp("net"), m)?;
let fpn = YoloV8Neck::load(vb.pp("fpn"), m)?;
let head = PoseHead::load(vb.pp("head"), num_classes, kpt, m.filters())?;
Ok(Self { net, fpn, head })
}
}
impl Module for YoloV8Pose {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
let (xs1, xs2, xs3) = self.net.forward(xs)?;
let (xs1, xs2, xs3) = self.fpn.forward(&xs1, &xs2, &xs3)?;
self.head.forward(&xs1, &xs2, &xs3)
}
}
#[derive(Debug, Clone, Copy, PartialEq, serde::Serialize, serde::Deserialize)]
pub struct KeyPoint {
pub x: f32,
pub y: f32,
pub mask: f32,
}
#[derive(Debug, Clone, serde::Serialize, serde::Deserialize)]
pub struct Bbox {
pub xmin: f32,
pub ymin: f32,
pub xmax: f32,
pub ymax: f32,
pub confidence: f32,
pub keypoints: Vec<KeyPoint>,
}
// Intersection over union of two bounding boxes.
fn iou(b1: &Bbox, b2: &Bbox) -> f32 {
let b1_area = (b1.xmax - b1.xmin + 1.) * (b1.ymax - b1.ymin + 1.);
let b2_area = (b2.xmax - b2.xmin + 1.) * (b2.ymax - b2.ymin + 1.);
let i_xmin = b1.xmin.max(b2.xmin);
let i_xmax = b1.xmax.min(b2.xmax);
let i_ymin = b1.ymin.max(b2.ymin);
let i_ymax = b1.ymax.min(b2.ymax);
let i_area = (i_xmax - i_xmin + 1.).max(0.) * (i_ymax - i_ymin + 1.).max(0.);
i_area / (b1_area + b2_area - i_area)
}
pub fn report_detect(
pred: &Tensor,
img: DynamicImage,
w: usize,
h: usize,
conf_threshold: f32,
iou_threshold: f32,
) -> Result<Vec<Vec<Bbox>>> {
let (pred_size, npreds) = pred.dims2()?;
let nclasses = pred_size - 4;
let conf_threshold = conf_threshold.clamp(0.0, 1.0);
let iou_threshold = iou_threshold.clamp(0.0, 1.0);
// The bounding boxes grouped by (maximum) class index.
let mut bboxes: Vec<Vec<Bbox>> = (0..nclasses).map(|_| vec![]).collect();
// Extract the bounding boxes for which confidence is above the threshold.
for index in 0..npreds {
let pred = Vec::<f32>::try_from(pred.i((.., index))?)?;
let confidence = *pred[4..].iter().max_by(|x, y| x.total_cmp(y)).unwrap();
if confidence > conf_threshold {
let mut class_index = 0;
for i in 0..nclasses {
if pred[4 + i] > pred[4 + class_index] {
class_index = i
}
}
if pred[class_index + 4] > 0. {
let bbox = Bbox {
xmin: pred[0] - pred[2] / 2.,
ymin: pred[1] - pred[3] / 2.,
xmax: pred[0] + pred[2] / 2.,
ymax: pred[1] + pred[3] / 2.,
confidence,
keypoints: vec![],
};
bboxes[class_index].push(bbox)
}
}
}
non_maximum_suppression(&mut bboxes, iou_threshold);
// Annotate the original image and print boxes information.
let (initial_h, initial_w) = (img.height() as f32, img.width() as f32);
let w_ratio = initial_w / w as f32;
let h_ratio = initial_h / h as f32;
for (class_index, bboxes_for_class) in bboxes.iter_mut().enumerate() {
for b in bboxes_for_class.iter_mut() {
crate::console_log!("{}: {:?}", crate::coco_classes::NAMES[class_index], b);
b.xmin = (b.xmin * w_ratio).clamp(0., initial_w - 1.);
b.ymin = (b.ymin * h_ratio).clamp(0., initial_h - 1.);
b.xmax = (b.xmax * w_ratio).clamp(0., initial_w - 1.);
b.ymax = (b.ymax * h_ratio).clamp(0., initial_h - 1.);
}
}
Ok(bboxes)
}
fn non_maximum_suppression(bboxes: &mut [Vec<Bbox>], threshold: f32) {
// Perform non-maximum suppression.
for bboxes_for_class in bboxes.iter_mut() {
bboxes_for_class.sort_by(|b1, b2| b2.confidence.partial_cmp(&b1.confidence).unwrap());
let mut current_index = 0;
for index in 0..bboxes_for_class.len() {
let mut drop = false;
for prev_index in 0..current_index {
let iou = iou(&bboxes_for_class[prev_index], &bboxes_for_class[index]);
if iou > threshold {
drop = true;
break;
}
}
if !drop {
bboxes_for_class.swap(current_index, index);
current_index += 1;
}
}
bboxes_for_class.truncate(current_index);
}
}
pub fn report_pose(
pred: &Tensor,
img: DynamicImage,
w: usize,
h: usize,
confidence_threshold: f32,
nms_threshold: f32,
) -> Result<Vec<Bbox>> {
let (pred_size, npreds) = pred.dims2()?;
if pred_size != 17 * 3 + 4 + 1 {
candle::bail!("unexpected pred-size {pred_size}");
}
let mut bboxes = vec![];
// Extract the bounding boxes for which confidence is above the threshold.
for index in 0..npreds {
let pred = Vec::<f32>::try_from(pred.i((.., index))?)?;
let confidence = pred[4];
if confidence > confidence_threshold {
let keypoints = (0..17)
.map(|i| KeyPoint {
x: pred[3 * i + 5],
y: pred[3 * i + 6],
mask: pred[3 * i + 7],
})
.collect::<Vec<_>>();
let bbox = Bbox {
xmin: pred[0] - pred[2] / 2.,
ymin: pred[1] - pred[3] / 2.,
xmax: pred[0] + pred[2] / 2.,
ymax: pred[1] + pred[3] / 2.,
confidence,
keypoints,
};
bboxes.push(bbox)
}
}
let mut bboxes = vec![bboxes];
non_maximum_suppression(&mut bboxes, nms_threshold);
let mut bboxes = bboxes.into_iter().next().unwrap();
let (initial_h, initial_w) = (img.height() as f32, img.width() as f32);
let w_ratio = initial_w / w as f32;
let h_ratio = initial_h / h as f32;
for b in bboxes.iter_mut() {
crate::console_log!("detected {b:?}");
b.xmin = (b.xmin * w_ratio).clamp(0., initial_w - 1.);
b.ymin = (b.ymin * h_ratio).clamp(0., initial_h - 1.);
b.xmax = (b.xmax * w_ratio).clamp(0., initial_w - 1.);
b.ymax = (b.ymax * h_ratio).clamp(0., initial_h - 1.);
for kp in b.keypoints.iter_mut() {
kp.x = (kp.x * w_ratio).clamp(0., initial_w - 1.);
kp.y = (kp.y * h_ratio).clamp(0., initial_h - 1.);
}
}
Ok(bboxes)
}
| 2 |
0 | hf_public_repos/candle/candle-wasm-examples/yolo | hf_public_repos/candle/candle-wasm-examples/yolo/src/worker.rs | use crate::model::{report_detect, report_pose, Bbox, Multiples, YoloV8, YoloV8Pose};
use candle::{DType, Device, Result, Tensor};
use candle_nn::{Module, VarBuilder};
use serde::{Deserialize, Serialize};
use wasm_bindgen::prelude::*;
use yew_agent::{HandlerId, Public, WorkerLink};
#[wasm_bindgen]
extern "C" {
// Use `js_namespace` here to bind `console.log(..)` instead of just
// `log(..)`
#[wasm_bindgen(js_namespace = console)]
pub fn log(s: &str);
}
#[macro_export]
macro_rules! console_log {
// Note that this is using the `log` function imported above during
// `bare_bones`
($($t:tt)*) => ($crate::worker::log(&format_args!($($t)*).to_string()))
}
// Communication to the worker happens through bincode, the model weights and configs are fetched
// on the main thread and transferred via the following structure.
#[derive(Serialize, Deserialize)]
pub struct ModelData {
pub weights: Vec<u8>,
pub model_size: String,
}
#[derive(Serialize, Deserialize)]
pub struct RunData {
pub image_data: Vec<u8>,
pub conf_threshold: f32,
pub iou_threshold: f32,
}
pub struct Model {
model: YoloV8,
}
impl Model {
pub fn run(
&self,
image_data: Vec<u8>,
conf_threshold: f32,
iou_threshold: f32,
) -> Result<Vec<Vec<Bbox>>> {
console_log!("image data: {}", image_data.len());
let image_data = std::io::Cursor::new(image_data);
let original_image = image::ImageReader::new(image_data)
.with_guessed_format()?
.decode()
.map_err(candle::Error::wrap)?;
let (width, height) = {
let w = original_image.width() as usize;
let h = original_image.height() as usize;
if w < h {
let w = w * 640 / h;
// Sizes have to be divisible by 32.
(w / 32 * 32, 640)
} else {
let h = h * 640 / w;
(640, h / 32 * 32)
}
};
let image_t = {
let img = original_image.resize_exact(
width as u32,
height as u32,
image::imageops::FilterType::CatmullRom,
);
let data = img.to_rgb8().into_raw();
Tensor::from_vec(
data,
(img.height() as usize, img.width() as usize, 3),
&Device::Cpu,
)?
.permute((2, 0, 1))?
};
let image_t = (image_t.unsqueeze(0)?.to_dtype(DType::F32)? * (1. / 255.))?;
let predictions = self.model.forward(&image_t)?.squeeze(0)?;
console_log!("generated predictions {predictions:?}");
let bboxes = report_detect(
&predictions,
original_image,
width,
height,
conf_threshold,
iou_threshold,
)?;
Ok(bboxes)
}
pub fn load_(weights: Vec<u8>, model_size: &str) -> Result<Self> {
let multiples = match model_size {
"n" => Multiples::n(),
"s" => Multiples::s(),
"m" => Multiples::m(),
"l" => Multiples::l(),
"x" => Multiples::x(),
_ => Err(candle::Error::Msg(
"invalid model size: must be n, s, m, l or x".to_string(),
))?,
};
let dev = &Device::Cpu;
let vb = VarBuilder::from_buffered_safetensors(weights, DType::F32, dev)?;
let model = YoloV8::load(vb, multiples, 80)?;
Ok(Self { model })
}
pub fn load(md: ModelData) -> Result<Self> {
Self::load_(md.weights, &md.model_size.to_string())
}
}
pub struct ModelPose {
model: YoloV8Pose,
}
impl ModelPose {
pub fn run(
&self,
image_data: Vec<u8>,
conf_threshold: f32,
iou_threshold: f32,
) -> Result<Vec<Bbox>> {
console_log!("image data: {}", image_data.len());
let image_data = std::io::Cursor::new(image_data);
let original_image = image::ImageReader::new(image_data)
.with_guessed_format()?
.decode()
.map_err(candle::Error::wrap)?;
let (width, height) = {
let w = original_image.width() as usize;
let h = original_image.height() as usize;
if w < h {
let w = w * 640 / h;
// Sizes have to be divisible by 32.
(w / 32 * 32, 640)
} else {
let h = h * 640 / w;
(640, h / 32 * 32)
}
};
let image_t = {
let img = original_image.resize_exact(
width as u32,
height as u32,
image::imageops::FilterType::CatmullRom,
);
let data = img.to_rgb8().into_raw();
Tensor::from_vec(
data,
(img.height() as usize, img.width() as usize, 3),
&Device::Cpu,
)?
.permute((2, 0, 1))?
};
let image_t = (image_t.unsqueeze(0)?.to_dtype(DType::F32)? * (1. / 255.))?;
let predictions = self.model.forward(&image_t)?.squeeze(0)?;
console_log!("generated predictions {predictions:?}");
let bboxes = report_pose(
&predictions,
original_image,
width,
height,
conf_threshold,
iou_threshold,
)?;
Ok(bboxes)
}
pub fn load_(weights: Vec<u8>, model_size: &str) -> Result<Self> {
let multiples = match model_size {
"n" => Multiples::n(),
"s" => Multiples::s(),
"m" => Multiples::m(),
"l" => Multiples::l(),
"x" => Multiples::x(),
_ => Err(candle::Error::Msg(
"invalid model size: must be n, s, m, l or x".to_string(),
))?,
};
let dev = &Device::Cpu;
let vb = VarBuilder::from_buffered_safetensors(weights, DType::F32, dev)?;
let model = YoloV8Pose::load(vb, multiples, 1, (17, 3))?;
Ok(Self { model })
}
pub fn load(md: ModelData) -> Result<Self> {
Self::load_(md.weights, &md.model_size.to_string())
}
}
pub struct Worker {
link: WorkerLink<Self>,
model: Option<Model>,
}
#[derive(Serialize, Deserialize)]
pub enum WorkerInput {
ModelData(ModelData),
RunData(RunData),
}
#[derive(Serialize, Deserialize)]
pub enum WorkerOutput {
ProcessingDone(std::result::Result<Vec<Vec<Bbox>>, String>),
WeightsLoaded,
}
impl yew_agent::Worker for Worker {
type Input = WorkerInput;
type Message = ();
type Output = std::result::Result<WorkerOutput, String>;
type Reach = Public<Self>;
fn create(link: WorkerLink<Self>) -> Self {
Self { link, model: None }
}
fn update(&mut self, _msg: Self::Message) {
// no messaging
}
fn handle_input(&mut self, msg: Self::Input, id: HandlerId) {
let output = match msg {
WorkerInput::ModelData(md) => match Model::load(md) {
Ok(model) => {
self.model = Some(model);
Ok(WorkerOutput::WeightsLoaded)
}
Err(err) => Err(format!("model creation error {err:?}")),
},
WorkerInput::RunData(rd) => match &mut self.model {
None => Err("model has not been set yet".to_string()),
Some(model) => {
let result = model
.run(rd.image_data, rd.conf_threshold, rd.iou_threshold)
.map_err(|e| e.to_string());
Ok(WorkerOutput::ProcessingDone(result))
}
},
};
self.link.respond(id, output);
}
fn name_of_resource() -> &'static str {
"worker.js"
}
fn resource_path_is_relative() -> bool {
true
}
}
| 3 |
0 | hf_public_repos/candle/candle-wasm-examples/yolo | hf_public_repos/candle/candle-wasm-examples/yolo/src/app.rs | use crate::console_log;
use crate::worker::{ModelData, RunData, Worker, WorkerInput, WorkerOutput};
use wasm_bindgen::prelude::*;
use wasm_bindgen_futures::JsFuture;
use yew::{html, Component, Context, Html};
use yew_agent::{Bridge, Bridged};
async fn fetch_url(url: &str) -> Result<Vec<u8>, JsValue> {
use web_sys::{Request, RequestCache, RequestInit, RequestMode, Response};
let window = web_sys::window().ok_or("window")?;
let opts = RequestInit::new();
opts.set_method("GET");
opts.set_mode(RequestMode::Cors);
opts.set_cache(RequestCache::NoCache);
let request = Request::new_with_str_and_init(url, &opts)?;
let resp_value = JsFuture::from(window.fetch_with_request(&request)).await?;
// `resp_value` is a `Response` object.
assert!(resp_value.is_instance_of::<Response>());
let resp: Response = resp_value.dyn_into()?;
let data = JsFuture::from(resp.blob()?).await?;
let blob = web_sys::Blob::from(data);
let array_buffer = JsFuture::from(blob.array_buffer()).await?;
let data = js_sys::Uint8Array::new(&array_buffer).to_vec();
Ok(data)
}
pub enum Msg {
Refresh,
Run,
UpdateStatus(String),
SetModel(ModelData),
WorkerIn(WorkerInput),
WorkerOut(Result<WorkerOutput, String>),
}
pub struct CurrentDecode {
start_time: Option<f64>,
}
pub struct App {
status: String,
loaded: bool,
generated: String,
current_decode: Option<CurrentDecode>,
worker: Box<dyn Bridge<Worker>>,
}
async fn model_data_load() -> Result<ModelData, JsValue> {
let weights = fetch_url("yolov8s.safetensors").await?;
let model_size = "s".to_string();
console_log!("loaded weights {}", weights.len());
Ok(ModelData {
weights,
model_size,
})
}
fn performance_now() -> Option<f64> {
let window = web_sys::window()?;
let performance = window.performance()?;
Some(performance.now() / 1000.)
}
fn draw_bboxes(bboxes: Vec<Vec<crate::model::Bbox>>) -> Result<(), JsValue> {
let document = web_sys::window().unwrap().document().unwrap();
let canvas = match document.get_element_by_id("canvas") {
Some(canvas) => canvas,
None => return Err("no canvas".into()),
};
let canvas: web_sys::HtmlCanvasElement = canvas.dyn_into::<web_sys::HtmlCanvasElement>()?;
let context = canvas
.get_context("2d")?
.ok_or("no 2d")?
.dyn_into::<web_sys::CanvasRenderingContext2d>()?;
let image_html_element = document.get_element_by_id("bike-img");
let image_html_element = match image_html_element {
Some(data) => data,
None => return Err("no bike-img".into()),
};
let image_html_element = image_html_element.dyn_into::<web_sys::HtmlImageElement>()?;
canvas.set_width(image_html_element.natural_width());
canvas.set_height(image_html_element.natural_height());
context.draw_image_with_html_image_element(&image_html_element, 0., 0.)?;
context.set_stroke_style(&JsValue::from("#0dff9a"));
for (class_index, bboxes_for_class) in bboxes.iter().enumerate() {
for b in bboxes_for_class.iter() {
let name = crate::coco_classes::NAMES[class_index];
context.stroke_rect(
b.xmin as f64,
b.ymin as f64,
(b.xmax - b.xmin) as f64,
(b.ymax - b.ymin) as f64,
);
if let Ok(metrics) = context.measure_text(name) {
let width = metrics.width();
context.set_fill_style(&"#3c8566".into());
context.fill_rect(b.xmin as f64 - 2., b.ymin as f64 - 12., width + 4., 14.);
context.set_fill_style(&"#e3fff3".into());
context.fill_text(name, b.xmin as f64, b.ymin as f64 - 2.)?
}
}
}
Ok(())
}
impl Component for App {
type Message = Msg;
type Properties = ();
fn create(ctx: &Context<Self>) -> Self {
let status = "loading weights".to_string();
let cb = {
let link = ctx.link().clone();
move |e| link.send_message(Self::Message::WorkerOut(e))
};
let worker = Worker::bridge(std::rc::Rc::new(cb));
Self {
status,
generated: String::new(),
current_decode: None,
worker,
loaded: false,
}
}
fn rendered(&mut self, ctx: &Context<Self>, first_render: bool) {
if first_render {
ctx.link().send_future(async {
match model_data_load().await {
Err(err) => {
let status = format!("{err:?}");
Msg::UpdateStatus(status)
}
Ok(model_data) => Msg::SetModel(model_data),
}
});
}
}
fn update(&mut self, ctx: &Context<Self>, msg: Self::Message) -> bool {
match msg {
Msg::SetModel(md) => {
self.status = "weights loaded successfully!".to_string();
self.loaded = true;
console_log!("loaded weights");
self.worker.send(WorkerInput::ModelData(md));
true
}
Msg::Run => {
if self.current_decode.is_some() {
self.status = "already processing some image at the moment".to_string()
} else {
let start_time = performance_now();
self.current_decode = Some(CurrentDecode { start_time });
self.status = "processing...".to_string();
self.generated.clear();
ctx.link().send_future(async {
match fetch_url("bike.jpeg").await {
Err(err) => {
let status = format!("{err:?}");
Msg::UpdateStatus(status)
}
Ok(image_data) => Msg::WorkerIn(WorkerInput::RunData(RunData {
image_data,
conf_threshold: 0.5,
iou_threshold: 0.5,
})),
}
});
}
true
}
Msg::WorkerOut(output) => {
match output {
Ok(WorkerOutput::WeightsLoaded) => self.status = "weights loaded!".to_string(),
Ok(WorkerOutput::ProcessingDone(Err(err))) => {
self.status = format!("error in worker process: {err}");
self.current_decode = None
}
Ok(WorkerOutput::ProcessingDone(Ok(bboxes))) => {
let mut content = Vec::new();
for (class_index, bboxes_for_class) in bboxes.iter().enumerate() {
for b in bboxes_for_class.iter() {
content.push(format!(
"bbox {}: xs {:.0}-{:.0} ys {:.0}-{:.0}",
crate::coco_classes::NAMES[class_index],
b.xmin,
b.xmax,
b.ymin,
b.ymax
))
}
}
self.generated = content.join("\n");
let dt = self.current_decode.as_ref().and_then(|current_decode| {
current_decode.start_time.and_then(|start_time| {
performance_now().map(|stop_time| stop_time - start_time)
})
});
self.status = match dt {
None => "processing succeeded!".to_string(),
Some(dt) => format!("processing succeeded in {:.2}s", dt,),
};
self.current_decode = None;
if let Err(err) = draw_bboxes(bboxes) {
self.status = format!("{err:?}")
}
}
Err(err) => {
self.status = format!("error in worker {err:?}");
}
}
true
}
Msg::WorkerIn(inp) => {
self.worker.send(inp);
true
}
Msg::UpdateStatus(status) => {
self.status = status;
true
}
Msg::Refresh => true,
}
}
fn view(&self, ctx: &Context<Self>) -> Html {
html! {
<div style="margin: 2%;">
<div><p>{"Running an object detection model in the browser using rust/wasm with "}
<a href="https://github.com/huggingface/candle" target="_blank">{"candle!"}</a>
</p>
<p>{"Once the weights have loaded, click on the run button to process an image."}</p>
<p><img id="bike-img" src="bike.jpeg"/></p>
<p>{"Source: "}<a href="https://commons.wikimedia.org/wiki/File:V%C3%A9lo_parade_-_V%C3%A9lorution_-_bike_critical_mass.JPG">{"wikimedia"}</a></p>
</div>
{
if self.loaded{
html!(<button class="button" onclick={ctx.link().callback(move |_| Msg::Run)}> { "run" }</button>)
}else{
html! { <progress id="progress-bar" aria-label="Loading weights..."></progress> }
}
}
<br/ >
<h3>
{&self.status}
</h3>
{
if self.current_decode.is_some() {
html! { <progress id="progress-bar" aria-label="generating…"></progress> }
} else {
html! {}
}
}
<div>
<canvas id="canvas" height="150" width="150"></canvas>
</div>
<blockquote>
<p> { self.generated.chars().map(|c|
if c == '\r' || c == '\n' {
html! { <br/> }
} else {
html! { {c} }
}).collect::<Html>()
} </p>
</blockquote>
</div>
}
}
}
| 4 |
0 | hf_public_repos/candle/candle-wasm-examples/yolo/src | hf_public_repos/candle/candle-wasm-examples/yolo/src/bin/m.rs | use candle_wasm_example_yolo::coco_classes;
use candle_wasm_example_yolo::model::Bbox;
use candle_wasm_example_yolo::worker::Model as M;
use candle_wasm_example_yolo::worker::ModelPose as P;
use wasm_bindgen::prelude::*;
#[wasm_bindgen]
pub struct Model {
inner: M,
}
#[wasm_bindgen]
impl Model {
#[wasm_bindgen(constructor)]
pub fn new(data: Vec<u8>, model_size: &str) -> Result<Model, JsError> {
let inner = M::load_(data, model_size)?;
Ok(Self { inner })
}
#[wasm_bindgen]
pub fn run(
&self,
image: Vec<u8>,
conf_threshold: f32,
iou_threshold: f32,
) -> Result<String, JsError> {
let bboxes = self.inner.run(image, conf_threshold, iou_threshold)?;
let mut detections: Vec<(String, Bbox)> = vec![];
for (class_index, bboxes_for_class) in bboxes.into_iter().enumerate() {
for b in bboxes_for_class.into_iter() {
detections.push((coco_classes::NAMES[class_index].to_string(), b));
}
}
let json = serde_json::to_string(&detections)?;
Ok(json)
}
}
#[wasm_bindgen]
pub struct ModelPose {
inner: P,
}
#[wasm_bindgen]
impl ModelPose {
#[wasm_bindgen(constructor)]
pub fn new(data: Vec<u8>, model_size: &str) -> Result<ModelPose, JsError> {
let inner = P::load_(data, model_size)?;
Ok(Self { inner })
}
#[wasm_bindgen]
pub fn run(
&self,
image: Vec<u8>,
conf_threshold: f32,
iou_threshold: f32,
) -> Result<String, JsError> {
let bboxes = self.inner.run(image, conf_threshold, iou_threshold)?;
let json = serde_json::to_string(&bboxes)?;
Ok(json)
}
}
fn main() {}
| 5 |
0 | hf_public_repos/candle/candle-wasm-examples/yolo/src | hf_public_repos/candle/candle-wasm-examples/yolo/src/bin/worker.rs | use yew_agent::PublicWorker;
fn main() {
console_error_panic_hook::set_once();
candle_wasm_example_yolo::Worker::register();
}
| 6 |
0 | hf_public_repos/candle/candle-wasm-examples/yolo/src | hf_public_repos/candle/candle-wasm-examples/yolo/src/bin/app.rs | fn main() {
wasm_logger::init(wasm_logger::Config::new(log::Level::Trace));
console_error_panic_hook::set_once();
yew::Renderer::<candle_wasm_example_yolo::App>::new().render();
}
| 7 |
0 | hf_public_repos/candle/candle-wasm-examples | hf_public_repos/candle/candle-wasm-examples/blip/index.html | <!DOCTYPE html>
<html>
<head>
<meta charset="UTF-8" />
<meta name="viewport" content="width=device-width, initial-scale=1.0" />
<style>
@import url("https://fonts.googleapis.com/css2?family=Source+Code+Pro:wght@200;300;400&family=Source+Sans+3:wght@100;200;300;400;500;600;700;800;900&display=swap");
html,
body {
font-family: "Source Sans 3", sans-serif;
}
</style>
<title>Candle Blip Image Captioning Demo</title>
<script src="https://cdn.tailwindcss.com"></script>
<script type="module" src="./code.js"></script>
<script type="module">
const MODELS = {
blip_image_quantized_q4k: {
base_url: "https://huggingface.co/lmz/candle-blip/resolve/main/",
model: "blip-image-captioning-large-q4k.gguf",
config: "config.json",
tokenizer: "tokenizer.json",
quantized: true,
size: "271 MB",
},
blip_image_quantized_q80: {
base_url: "https://huggingface.co/lmz/candle-blip/resolve/main/",
model: "blip-image-captioning-large-q80.gguf",
config: "config.json",
tokenizer: "tokenizer.json",
quantized: true,
size: "505 MB",
},
blip_image_large: {
base_url:
"https://huggingface.co/Salesforce/blip-image-captioning-large/resolve/refs%2Fpr%2F18/",
model: "model.safetensors",
config: "config.json",
tokenizer: "tokenizer.json",
quantized: false,
size: "1.88 GB",
},
};
const blipWorker = new Worker("./blipWorker.js", {
type: "module",
});
const outputStatusEl = document.querySelector("#output-status");
const outputCaptionEl = document.querySelector("#output-caption");
const modelSelectEl = document.querySelector("#model");
const clearBtn = document.querySelector("#clear-btn");
const fileUpload = document.querySelector("#file-upload");
const dropArea = document.querySelector("#drop-area");
const imagesExamples = document.querySelector("#image-select");
const canvas = document.querySelector("#canvas");
const ctxCanvas = canvas.getContext("2d");
let isCaptioning = false;
let currentImageURL = null;
clearBtn.addEventListener("click", () => {
clearImageCanvas();
});
modelSelectEl.addEventListener("change", () => {
if (currentImageURL) {
runInference(currentImageURL);
}
});
//add event listener to file input
fileUpload.addEventListener("input", async (e) => {
const target = e.target;
if (target.files.length > 0) {
const href = URL.createObjectURL(target.files[0]);
clearImageCanvas();
await drawImageCanvas(href);
runInference(href);
}
});
// add event listener to drop-area
dropArea.addEventListener("dragenter", (e) => {
e.preventDefault();
dropArea.classList.add("border-blue-700");
});
dropArea.addEventListener("dragleave", (e) => {
e.preventDefault();
dropArea.classList.remove("border-blue-700");
});
dropArea.addEventListener("dragover", (e) => {
e.preventDefault();
});
dropArea.addEventListener("drop", async (e) => {
e.preventDefault();
dropArea.classList.remove("border-blue-700");
const url = e.dataTransfer.getData("text/uri-list");
const files = e.dataTransfer.files;
if (files.length > 0) {
const href = URL.createObjectURL(files[0]);
clearImageCanvas();
await drawImageCanvas(href);
runInference(href);
} else if (url) {
clearImageCanvas();
await drawImageCanvas(url);
runInference(url);
}
});
imagesExamples.addEventListener("click", async (e) => {
if (isCaptioning) {
return;
}
const target = e.target;
if (target.nodeName === "IMG") {
const href = target.src;
clearImageCanvas();
await drawImageCanvas(href);
runInference(href);
}
});
function clearImageCanvas() {
ctxCanvas.clearRect(0, 0, canvas.width, canvas.height);
isCaptioning = false;
clearBtn.disabled = true;
canvas.parentElement.style.height = "auto";
outputStatusEl.hidden = false;
outputCaptionEl.hidden = true;
outputStatusEl.innerText = "Please select an image";
currentImageURL = null;
}
async function drawImageCanvas(imgURL) {
if (!imgURL) {
throw new Error("No image URL provided");
}
return new Promise((resolve, reject) => {
ctxCanvas.clearRect(0, 0, canvas.width, canvas.height);
ctxCanvas.clearRect(0, 0, canvas.width, canvas.height);
const img = new Image();
img.crossOrigin = "anonymous";
img.onload = () => {
canvas.width = img.width;
canvas.height = img.height;
ctxCanvas.drawImage(img, 0, 0);
canvas.parentElement.style.height = canvas.offsetHeight + "px";
clearBtn.disabled = false;
resolve(img);
};
img.src = imgURL;
currentImageURL = imgURL;
});
}
document.addEventListener("DOMContentLoaded", () => {
for (const [id, model] of Object.entries(MODELS)) {
const option = document.createElement("option");
option.value = id;
option.innerText = `${id} (${model.size})`;
modelSelectEl.appendChild(option);
}
});
async function getImageCaption(
worker,
weightsURL,
tokenizerURL,
configURL,
modelID,
imageURL,
quantized,
updateStatus = null
) {
return new Promise((resolve, reject) => {
worker.postMessage({
weightsURL,
tokenizerURL,
configURL,
modelID,
imageURL,
quantized,
});
function messageHandler(event) {
if ("error" in event.data) {
worker.removeEventListener("message", messageHandler);
reject(new Error(event.data.error));
}
if (event.data.status === "complete") {
worker.removeEventListener("message", messageHandler);
resolve(event.data);
}
if (updateStatus) updateStatus(event.data);
}
worker.addEventListener("message", messageHandler);
});
}
function updateStatus(data) {
if (data.status === "status") {
outputStatusEl.innerText = data.message;
}
}
async function runInference(imageURL) {
if (isCaptioning || !imageURL) {
alert("Please select an image first");
return;
}
outputStatusEl.hidden = false;
outputCaptionEl.hidden = true;
clearBtn.disabled = true;
modelSelectEl.disabled = true;
isCaptioning = true;
const selectedModel = modelSelectEl.value;
const model = MODELS[selectedModel];
const weightsURL = `${model.base_url}${model.model}`;
const tokenizerURL = `${model.base_url}${model.tokenizer}`;
const configURL = `${model.base_url}${model.config}`;
const quantized = model.quantized;
try {
const time = performance.now();
const caption = await getImageCaption(
blipWorker,
weightsURL,
tokenizerURL,
configURL,
selectedModel,
imageURL,
quantized,
updateStatus
);
outputStatusEl.hidden = true;
outputCaptionEl.hidden = false;
const totalTime = ((performance.now() - time)/1000).toFixed(2);
outputCaptionEl.innerHTML = `${
caption.output
}<br/><span class="text-xs">Inference time: ${totalTime} s</span>`;
} catch (err) {
console.error(err);
outputStatusEl.hidden = false;
outputCaptionEl.hidden = true;
outputStatusEl.innerText = err.message;
}
clearBtn.disabled = false;
modelSelectEl.disabled = false;
isCaptioning = false;
}
</script>
</head>
<body class="container max-w-4xl mx-auto p-4">
<main class="grid grid-cols-1 gap-5 relative">
<span class="absolute text-5xl -ml-[1em]"> 🕯️ </span>
<div>
<h1 class="text-5xl font-bold">Candle BLIP Image Captioning</h1>
<h2 class="text-2xl font-bold">Rust/WASM Demo</h2>
<p class="max-w-lg">
<a
href="https://huggingface.co/Salesforce/blip-image-captioning-large"
target="_blank"
class="underline hover:text-blue-500 hover:no-underline"
>BLIP Image Captioning
</a>
running in the browser using
<a
href="https://github.com/huggingface/candle/"
target="_blank"
class="underline hover:text-blue-500 hover:no-underline"
>Candle</a
>, a minimalist ML framework for Rust.
</p>
<p class="text-xs max-w-lg py-2">
<b>Note:</b>
The image captioning on the smallest model takes about ~50 seconds, it
will vary depending on your machine and model size.
</p>
</div>
<div>
<label for="model" class="font-medium block">Models Options: </label>
<select
id="model"
class="border-2 border-gray-500 rounded-md font-light interactive disabled:cursor-not-allowed w-full max-w-max"
></select>
</div>
<!-- drag and drop area -->
<div class="grid gap-4 sm:grid-cols-2 py-4">
<div class="relative max-w-lg">
<div
class="absolute w-full bottom-full flex justify-between items-center"
>
<div class="flex gap-2 w-full">
<button
id="clear-btn"
disabled
title="Clear Image"
class="ml-auto text-xs bg-white rounded-md disabled:opacity-50 flex gap-1 items-center"
>
<svg
class=""
xmlns="http://www.w3.org/2000/svg"
viewBox="0 0 13 12"
height="1em"
>
<path
d="M1.6.7 12 11.1M12 .7 1.6 11.1"
stroke="#2E3036"
stroke-width="2"
/>
</svg>
</button>
</div>
</div>
<div
id="drop-area"
class="flex flex-col items-center justify-center border-2 border-gray-300 border-dashed rounded-xl relative aspect-video w-full overflow-hidden"
>
<div
class="flex flex-col items-center justify-center space-y-1 text-center"
>
<svg
width="25"
height="25"
viewBox="0 0 25 25"
fill="none"
xmlns="http://www.w3.org/2000/svg"
>
<path
d="M3.5 24.3a3 3 0 0 1-1.9-.8c-.5-.5-.8-1.2-.8-1.9V2.9c0-.7.3-1.3.8-1.9.6-.5 1.2-.7 2-.7h18.6c.7 0 1.3.2 1.9.7.5.6.7 1.2.7 2v18.6c0 .7-.2 1.4-.7 1.9a3 3 0 0 1-2 .8H3.6Zm0-2.7h18.7V2.9H3.5v18.7Zm2.7-2.7h13.3c.3 0 .5 0 .6-.3v-.7l-3.7-5a.6.6 0 0 0-.6-.2c-.2 0-.4 0-.5.3l-3.5 4.6-2.4-3.3a.6.6 0 0 0-.6-.3c-.2 0-.4.1-.5.3l-2.7 3.6c-.1.2-.2.4 0 .7.1.2.3.3.6.3Z"
fill="#000"
/>
</svg>
<div class="flex text-sm text-gray-600">
<label
for="file-upload"
class="relative cursor-pointer bg-white rounded-md font-medium text-blue-950 hover:text-blue-700"
>
<span>Drag and drop y our image here</span>
<span class="block text-xs">or</span>
<span class="block text-xs">Click to upload</span>
</label>
</div>
<input
id="file-upload"
name="file-upload"
type="file"
class="sr-only"
/>
</div>
<canvas
id="canvas"
class="absolute pointer-events-none w-full"
></canvas>
</div>
</div>
<div class="">
<div
class="h-full bg-slate-100 text-gray-500 p-4 rounded-md flex flex-col gap-2"
>
<p
id="output-caption"
class="m-auto text-xl text-center p-2"
hidden
></p>
<span id="output-status" class="m-auto font-light">
Please select an image
</span>
</div>
</div>
</div>
<div>
<div
class="flex gap-3 items-center overflow-x-scroll"
id="image-select"
>
<h3 class="font-medium">Examples:</h3>
<img
src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/candle/examples/sf.jpg"
class="cursor-pointer w-24 h-24 object-cover"
/>
<img
src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/candle/examples/bike.jpeg"
class="cursor-pointer w-24 h-24 object-cover"
/>
<img
src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/candle/examples/000000000077.jpg"
class="cursor-pointer w-24 h-24 object-cover"
/>
</div>
</div>
</main>
</body>
</html>
| 8 |
0 | hf_public_repos/candle/candle-wasm-examples | hf_public_repos/candle/candle-wasm-examples/blip/Cargo.toml | [package]
name = "candle-wasm-example-blip"
version.workspace = true
edition.workspace = true
description.workspace = true
repository.workspace = true
keywords.workspace = true
categories.workspace = true
license.workspace = true
[dependencies]
candle = { workspace = true }
candle-nn = { workspace = true }
candle-transformers = { workspace = true }
tokenizers = { workspace = true, features = ["unstable_wasm"] }
num-traits = { workspace = true }
# App crates.
anyhow = { workspace = true }
byteorder = { workspace = true }
getrandom = { version = "0.2", features = ["js"] }
image = { workspace = true }
log = { workspace = true }
safetensors = { workspace = true }
serde = { workspace = true }
serde_json = { workspace = true }
# Wasm specific crates.
console_error_panic_hook = "0.1.7"
wasm-bindgen = "0.2.87"
js-sys = "0.3.64"
| 9 |
0 | hf_public_repos/api-inference-community/docker_images/sklearn | hf_public_repos/api-inference-community/docker_images/sklearn/app/main.py | import logging
import os
from typing import Dict, Type
from api_inference_community.routes import pipeline_route, status_ok
from app.pipelines import (
Pipeline,
TabularClassificationPipeline,
TabularRegressionPipeline,
TextClassificationPipeline,
)
from starlette.applications import Starlette
from starlette.middleware import Middleware
from starlette.middleware.gzip import GZipMiddleware
from starlette.routing import Route
TASK = os.getenv("TASK")
MODEL_ID = os.getenv("MODEL_ID")
logger = logging.getLogger(__name__)
# Add the allowed tasks
# Supported tasks are:
# - text-generation
# - text-classification
# - token-classification
# - translation
# - summarization
# - automatic-speech-recognition
# - ...
# For instance
# from app.pipelines import AutomaticSpeechRecognitionPipeline
# ALLOWED_TASKS = {"automatic-speech-recognition": AutomaticSpeechRecognitionPipeline}
# You can check the requirements and expectations of each pipelines in their respective
# directories. Implement directly within the directories.
ALLOWED_TASKS: Dict[str, Type[Pipeline]] = {
# IMPLEMENT_THIS: Add your implemented tasks here!
"tabular-classification": TabularClassificationPipeline,
"tabular-regression": TabularRegressionPipeline,
"text-classification": TextClassificationPipeline,
}
def get_pipeline(task=None, model_id=None) -> Pipeline:
task = task or os.environ["TASK"]
model_id = model_id or os.environ["MODEL_ID"]
if task not in ALLOWED_TASKS:
raise EnvironmentError(
f"{task} is not a valid pipeline for model : {model_id} ({','.join(ALLOWED_TASKS.keys())})"
)
return ALLOWED_TASKS[task](model_id)
routes = [
Route("/{whatever:path}", status_ok),
Route("/{whatever:path}", pipeline_route, methods=["POST"]),
]
middleware = [Middleware(GZipMiddleware, minimum_size=1000)]
if os.environ.get("DEBUG", "") == "1":
from starlette.middleware.cors import CORSMiddleware
middleware.append(
Middleware(
CORSMiddleware,
allow_origins=["*"],
allow_headers=["*"],
allow_methods=["*"],
)
)
app = Starlette(routes=routes, middleware=middleware)
@app.on_event("startup")
async def startup_event():
logger = logging.getLogger("uvicorn.access")
handler = logging.StreamHandler()
handler.setFormatter(logging.Formatter("%(asctime)s - %(levelname)s - %(message)s"))
logger.handlers = [handler]
# Link between `api-inference-community` and framework code.
app.get_pipeline = get_pipeline
try:
get_pipeline()
except Exception:
# We can fail so we can show exception later.
pass
if __name__ == "__main__":
try:
get_pipeline()
except Exception:
# We can fail so we can show exception later.
pass
| 0 |
0 | hf_public_repos/api-inference-community/docker_images/sklearn/app | hf_public_repos/api-inference-community/docker_images/sklearn/app/pipelines/common.py | import json
import logging
import warnings
from abc import abstractmethod
from pathlib import Path
from typing import Any
import joblib
import skops.io as sio
from app.pipelines import Pipeline
from huggingface_hub import snapshot_download
logger = logging.getLogger(__name__)
DEFAULT_FILENAME = "sklearn_model.joblib"
class SklearnBasePipeline(Pipeline):
"""Base class for sklearn-based inference pipelines
Concrete implementations should add two methods:
- `_get_output`: Method to generate model predictions
- `__call__`: Should delegate to handle_call, add docstring and type
annotations.
"""
def __init__(self, model_id: str):
cached_folder = snapshot_download(repo_id=model_id)
self._load_warnings = []
self._load_exception = None
try:
with open(Path(cached_folder) / "config.json") as f:
# this is the default path for configuration of a scikit-learn
# project. If the project is created using `skops`, it should have
# this file.
config = json.load(f)
except Exception:
config = dict()
warnings.warn("`config.json` does not exist or is invalid.")
self.model_file = (
config.get("sklearn", {}).get("model", {}).get("file", DEFAULT_FILENAME)
)
self.model_format = config.get("sklearn", {}).get("model_format", "pickle")
try:
with warnings.catch_warnings(record=True) as record:
if self.model_format == "pickle":
self.model = joblib.load(
open(Path(cached_folder) / self.model_file, "rb")
)
elif self.model_format == "skops":
self.model = sio.load(
file=Path(cached_folder) / self.model_file, trusted=True
)
if len(record) > 0:
# if there's a warning while loading the model, we save it so
# that it can be raised to the user when __call__ is called.
self._load_warnings += record
except Exception as e:
# if there is an exception while loading the model, we save it to
# raise the write error when __call__ is called.
self._load_exception = e
# use column names from the config file if available, to give the data
# to the model in the right order.
self.columns = config.get("sklearn", {}).get("columns", None)
@abstractmethod
def _get_output(self, inputs: Any) -> Any:
raise NotImplementedError(
"Implement this method to get the model output (prediction)"
)
def __call__(self, inputs: Any) -> Any:
"""Handle call for getting the model prediction
This method is responsible for handling all possible errors and
warnings. To get the actual prediction, implement the `_get_output`
method.
The types of the inputs and output depend on the specific task being
implemented.
"""
if self._load_exception:
# there has been an error while loading the model. We need to raise
# that, and can't call predict on the model.
raise ValueError(
"An error occurred while loading the model: "
f"{str(self._load_exception)}"
)
_warnings = []
if self.columns:
# TODO: we should probably warn if columns are not configured, we
# really do need them.
given_cols = set(inputs["data"].keys())
expected = set(self.columns)
extra = given_cols - expected
missing = expected - given_cols
if extra:
_warnings.append(
f"The following columns were given but not expected: {extra}"
)
if missing:
_warnings.append(
f"The following columns were expected but not given: {missing}"
)
exception = None
try:
with warnings.catch_warnings(record=True) as record:
res = self._get_output(inputs)
except Exception as e:
exception = e
for warning in record:
_warnings.append(f"{warning.category.__name__}({warning.message})")
for warning in self._load_warnings:
_warnings.append(f"{warning.category.__name__}({warning.message})")
if _warnings:
for warning in _warnings:
logger.warning(warning)
if not exception:
# we raise an error if there are any warnings, so that routes.py
# can catch and return a non 200 status code.
error = {
"error": "There were warnings while running the model.",
"output": res,
"warnings": _warnings, # see issue #96
}
raise ValueError(json.dumps(error))
else:
# if there was an exception, we raise it so that routes.py can
# catch and return a non 200 status code.
raise exception
if exception:
raise exception
return res
| 1 |
0 | hf_public_repos/api-inference-community/docker_images/sklearn/app | hf_public_repos/api-inference-community/docker_images/sklearn/app/pipelines/tabular_regression.py | from app.pipelines.tabular_classification import TabularClassificationPipeline
class TabularRegressionPipeline(TabularClassificationPipeline):
# The actual work done by the pipeline is identical
pass
| 2 |
0 | hf_public_repos/api-inference-community/docker_images/sklearn/app | hf_public_repos/api-inference-community/docker_images/sklearn/app/pipelines/base.py | from abc import ABC, abstractmethod
from typing import Any
class Pipeline(ABC):
@abstractmethod
def __init__(self, model_id: str):
raise NotImplementedError("Pipelines should implement an __init__ method")
@abstractmethod
def __call__(self, inputs: Any) -> Any:
raise NotImplementedError("Pipelines should implement a __call__ method")
class PipelineException(Exception):
pass
| 3 |
0 | hf_public_repos/api-inference-community/docker_images/sklearn/app | hf_public_repos/api-inference-community/docker_images/sklearn/app/pipelines/__init__.py | from app.pipelines.base import Pipeline, PipelineException # isort:skip
from app.pipelines.tabular_classification import TabularClassificationPipeline
from app.pipelines.tabular_regression import TabularRegressionPipeline
from app.pipelines.text_classification import TextClassificationPipeline
| 4 |
0 | hf_public_repos/api-inference-community/docker_images/sklearn/app | hf_public_repos/api-inference-community/docker_images/sklearn/app/pipelines/text_classification.py | from typing import Dict, List
from app.pipelines.common import SklearnBasePipeline
class TextClassificationPipeline(SklearnBasePipeline):
def _get_output(self, inputs: str) -> List[Dict[str, float]]:
res = []
for i, c in enumerate(self.model.predict_proba([inputs]).tolist()[0]):
res.append({"label": str(self.model.classes_[i]), "score": c})
return [res]
| 5 |
0 | hf_public_repos/api-inference-community/docker_images/sklearn/app | hf_public_repos/api-inference-community/docker_images/sklearn/app/pipelines/tabular_classification.py | from typing import Dict, List, Union
import pandas as pd
from app.pipelines.common import SklearnBasePipeline
class TabularClassificationPipeline(SklearnBasePipeline):
def _get_output(
self, inputs: Dict[str, Dict[str, List[Union[str, float]]]]
) -> List[Union[str, float]]:
# We convert the inputs to a pandas DataFrame, and use self.columns
# to order the columns in the order they're expected, ignore extra
# columns given if any, and put NaN for missing columns.
data = pd.DataFrame(inputs["data"], columns=self.columns)
res = self.model.predict(data).tolist()
return res
| 6 |
0 | hf_public_repos/api-inference-community/docker_images/sklearn | hf_public_repos/api-inference-community/docker_images/sklearn/tests/test_docker_build.py | import os
import subprocess
from unittest import TestCase
class cd:
"""Context manager for changing the current working directory"""
def __init__(self, newPath):
self.newPath = os.path.expanduser(newPath)
def __enter__(self):
self.savedPath = os.getcwd()
os.chdir(self.newPath)
def __exit__(self, etype, value, traceback):
os.chdir(self.savedPath)
class DockerBuildTestCase(TestCase):
def test_can_build_docker_image(self):
with cd(os.path.dirname(os.path.dirname(__file__))):
subprocess.check_output(["docker", "build", "."])
| 7 |
0 | hf_public_repos/api-inference-community/docker_images/sklearn | hf_public_repos/api-inference-community/docker_images/sklearn/tests/test_api.py | import os
from unittest import TestCase, skipIf
from app.main import ALLOWED_TASKS, get_pipeline
# Must contain at least one example of each implemented pipeline
TESTABLE_MODELS = {
"tabular-classification": [
"skops-tests/iris-sklearn-1.0-logistic_regression-with-config-skops",
"skops-tests/iris-sklearn-1.0-hist_gradient_boosting-with-config-skops",
"skops-tests/iris-sklearn-latest-logistic_regression-with-config-skops",
"skops-tests/iris-sklearn-latest-hist_gradient_boosting-with-config-skops",
"skops-tests/iris-sklearn-1.0-logistic_regression-with-config-pickle",
"skops-tests/iris-sklearn-1.0-logistic_regression-without-config-pickle",
"skops-tests/iris-sklearn-1.0-hist_gradient_boosting-with-config-pickle",
"skops-tests/iris-sklearn-1.0-hist_gradient_boosting-without-config-pickle",
"skops-tests/iris-sklearn-latest-logistic_regression-with-config-pickle",
"skops-tests/iris-sklearn-latest-logistic_regression-without-config-pickle",
"skops-tests/iris-sklearn-latest-hist_gradient_boosting-with-config-pickle",
"skops-tests/iris-sklearn-latest-hist_gradient_boosting-without-config-pickle",
],
"tabular-regression": [
"skops-tests/tabularregression-sklearn-1.0-linear_regression-with-config-skops",
"skops-tests/tabularregression-sklearn-1.0-hist_gradient_boosting_regressor-with-config-skops",
"skops-tests/tabularregression-sklearn-latest-linear_regression-with-config-skops",
"skops-tests/tabularregression-sklearn-latest-hist_gradient_boosting_regressor-with-config-skops",
"skops-tests/tabularregression-sklearn-1.0-linear_regression-with-config-pickle",
"skops-tests/tabularregression-sklearn-1.0-linear_regression-without-config-pickle",
"skops-tests/tabularregression-sklearn-1.0-hist_gradient_boosting_regressor-with-config-pickle",
"skops-tests/tabularregression-sklearn-1.0-hist_gradient_boosting_regressor-without-config-pickle",
"skops-tests/tabularregression-sklearn-latest-linear_regression-with-config-pickle",
"skops-tests/tabularregression-sklearn-latest-linear_regression-without-config-pickle",
"skops-tests/tabularregression-sklearn-latest-hist_gradient_boosting_regressor-with-config-pickle",
"skops-tests/tabularregression-sklearn-latest-hist_gradient_boosting_regressor-without-config-pickle",
],
"text-classification": [
"skops-tests/textclassification-sklearn-latest-hist_gradient_boosting-with-config-skops",
"skops-tests/textclassification-sklearn-1.0-hist_gradient_boosting-with-config-skops",
"skops-tests/textclassification-sklearn-latest-logistic_regression-with-config-skops",
"skops-tests/textclassification-sklearn-1.0-logistic_regression-with-config-skops",
"skops-tests/textclassification-sklearn-latest-hist_gradient_boosting-without-config-pickle",
"skops-tests/textclassification-sklearn-latest-hist_gradient_boosting-with-config-pickle",
"skops-tests/textclassification-sklearn-1.0-hist_gradient_boosting-without-config-pickle",
"skops-tests/textclassification-sklearn-1.0-hist_gradient_boosting-with-config-pickle",
"skops-tests/textclassification-sklearn-latest-logistic_regression-without-config-pickle",
"skops-tests/textclassification-sklearn-latest-logistic_regression-with-config-pickle",
"skops-tests/textclassification-sklearn-1.0-logistic_regression-without-config-pickle",
"skops-tests/textclassification-sklearn-1.0-logistic_regression-with-config-pickle",
],
}
# This contains information about the test cases above, used in the tests to
# define which tests to run for which examples.
TEST_CASES = {
"tabular-classification": {
"skops-tests/iris-sklearn-latest-logistic_regression-without-config-pickle": {
"input": "iris-latest-input.json",
"output": "iris-logistic_regression-latest-output.json",
"has_config": False,
"old_sklearn": False,
"accepts_nan": False,
"loads_on_new_sklearn": True,
},
"skops-tests/iris-sklearn-latest-logistic_regression-with-config-pickle": {
"input": "iris-latest-input.json",
"output": "iris-logistic_regression-latest-output.json",
"has_config": True,
"old_sklearn": False,
"accepts_nan": False,
"loads_on_new_sklearn": True,
},
"skops-tests/iris-sklearn-latest-logistic_regression-with-config-skops": {
"input": "iris-latest-input.json",
"output": "iris-logistic_regression-latest-output.json",
"has_config": True,
"old_sklearn": False,
"accepts_nan": False,
"loads_on_new_sklearn": True,
},
"skops-tests/iris-sklearn-1.0-logistic_regression-without-config-pickle": {
"input": "iris-1.0-input.json",
"output": "iris-logistic_regression-1.0-output.json",
"has_config": False,
"old_sklearn": True,
"accepts_nan": False,
"loads_on_new_sklearn": True,
},
"skops-tests/iris-sklearn-1.0-logistic_regression-with-config-pickle": {
"input": "iris-1.0-input.json",
"output": "iris-logistic_regression-1.0-output.json",
"has_config": True,
"old_sklearn": True,
"accepts_nan": False,
"loads_on_new_sklearn": True,
},
"skops-tests/iris-sklearn-1.0-logistic_regression-with-config-skops": {
"input": "iris-1.0-input.json",
"output": "iris-logistic_regression-1.0-output.json",
"has_config": True,
"old_sklearn": True,
"accepts_nan": False,
"loads_on_new_sklearn": True,
},
"skops-tests/iris-sklearn-latest-hist_gradient_boosting-without-config-pickle": {
"input": "iris-latest-input.json",
"output": "iris-hist_gradient_boosting-latest-output.json",
"has_config": False,
"old_sklearn": False,
"accepts_nan": True,
"loads_on_new_sklearn": True,
},
"skops-tests/iris-sklearn-latest-hist_gradient_boosting-with-config-pickle": {
"input": "iris-latest-input.json",
"output": "iris-hist_gradient_boosting-latest-output.json",
"has_config": True,
"old_sklearn": False,
"accepts_nan": True,
"loads_on_new_sklearn": True,
},
"skops-tests/iris-sklearn-latest-hist_gradient_boosting-with-config-skops": {
"input": "iris-latest-input.json",
"output": "iris-hist_gradient_boosting-latest-output.json",
"has_config": True,
"old_sklearn": False,
"accepts_nan": True,
"loads_on_new_sklearn": True,
},
"skops-tests/iris-sklearn-1.0-hist_gradient_boosting-without-config-pickle": {
"input": "iris-1.0-input.json",
"output": "iris-hist_gradient_boosting-1.0-output.json",
"has_config": False,
"old_sklearn": True,
"accepts_nan": True,
"loads_on_new_sklearn": False,
},
"skops-tests/iris-sklearn-1.0-hist_gradient_boosting-with-config-pickle": {
"input": "iris-1.0-input.json",
"output": "iris-hist_gradient_boosting-1.0-output.json",
"has_config": True,
"old_sklearn": True,
"accepts_nan": True,
"loads_on_new_sklearn": False,
},
"skops-tests/iris-sklearn-1.0-hist_gradient_boosting-with-config-skops": {
"input": "iris-1.0-input.json",
"output": "iris-hist_gradient_boosting-1.0-output.json",
"has_config": True,
"old_sklearn": True,
"accepts_nan": True,
"loads_on_new_sklearn": False,
},
},
"tabular-regression": {
"skops-tests/tabularregression-sklearn-latest-linear_regression-without-config-pickle": {
"input": "tabularregression-latest-input.json",
"output": "tabularregression-linear_regression-latest-output.json",
"has_config": False,
"old_sklearn": False,
"accepts_nan": False,
"loads_on_new_sklearn": True,
},
"skops-tests/tabularregression-sklearn-latest-linear_regression-with-config-pickle": {
"input": "tabularregression-latest-input.json",
"output": "tabularregression-linear_regression-latest-output.json",
"has_config": True,
"old_sklearn": False,
"accepts_nan": False,
"loads_on_new_sklearn": True,
},
"skops-tests/tabularregression-sklearn-latest-linear_regression-with-config-skops": {
"input": "tabularregression-latest-input.json",
"output": "tabularregression-linear_regression-latest-output.json",
"has_config": True,
"old_sklearn": False,
"accepts_nan": False,
"loads_on_new_sklearn": True,
},
"skops-tests/tabularregression-sklearn-1.0-linear_regression-without-config-pickle": {
"input": "tabularregression-1.0-input.json",
"output": "tabularregression-linear_regression-1.0-output.json",
"has_config": False,
"old_sklearn": True,
"accepts_nan": False,
"loads_on_new_sklearn": True,
},
"skops-tests/tabularregression-sklearn-1.0-linear_regression-with-config-pickle": {
"input": "tabularregression-1.0-input.json",
"output": "tabularregression-linear_regression-1.0-output.json",
"has_config": True,
"old_sklearn": True,
"accepts_nan": False,
"loads_on_new_sklearn": True,
},
"skops-tests/tabularregression-sklearn-1.0-linear_regression-with-config-skops": {
"input": "tabularregression-1.0-input.json",
"output": "tabularregression-linear_regression-1.0-output.json",
"has_config": True,
"old_sklearn": True,
"accepts_nan": False,
"loads_on_new_sklearn": True,
},
"skops-tests/tabularregression-sklearn-latest-hist_gradient_boosting_regressor-without-config-pickle": {
"input": "tabularregression-latest-input.json",
"output": "tabularregression-hist_gradient_boosting_regressor-latest-output.json",
"has_config": False,
"old_sklearn": False,
"accepts_nan": True,
"loads_on_new_sklearn": True,
},
"skops-tests/tabularregression-sklearn-latest-hist_gradient_boosting_regressor-with-config-pickle": {
"input": "tabularregression-latest-input.json",
"output": "tabularregression-hist_gradient_boosting_regressor-latest-output.json",
"has_config": True,
"old_sklearn": False,
"accepts_nan": True,
"loads_on_new_sklearn": True,
},
"skops-tests/tabularregression-sklearn-latest-hist_gradient_boosting_regressor-with-config-skops": {
"input": "tabularregression-latest-input.json",
"output": "tabularregression-hist_gradient_boosting_regressor-latest-output.json",
"has_config": True,
"old_sklearn": False,
"accepts_nan": True,
"loads_on_new_sklearn": True,
},
"skops-tests/tabularregression-sklearn-1.0-hist_gradient_boosting_regressor-without-config-pickle": {
"input": "tabularregression-1.0-input.json",
"output": "tabularregression-hist_gradient_boosting_regressor-1.0-output.json",
"has_config": False,
"old_sklearn": True,
"accepts_nan": True,
"loads_on_new_sklearn": False,
},
"skops-tests/tabularregression-sklearn-1.0-hist_gradient_boosting_regressor-with-config-pickle": {
"input": "tabularregression-1.0-input.json",
"output": "tabularregression-hist_gradient_boosting_regressor-1.0-output.json",
"has_config": True,
"old_sklearn": True,
"accepts_nan": True,
"loads_on_new_sklearn": False,
},
"skops-tests/tabularregression-sklearn-1.0-hist_gradient_boosting_regressor-with-config-skops": {
"input": "tabularregression-1.0-input.json",
"output": "tabularregression-hist_gradient_boosting_regressor-1.0-output.json",
"has_config": True,
"old_sklearn": True,
"accepts_nan": True,
"loads_on_new_sklearn": False,
},
},
"text-classification": {
"skops-tests/textclassification-sklearn-latest-hist_gradient_boosting-without-config-pickle": {
"input": "textclassification-latest-input.json",
"output": "textclassification-hist_gradient_boosting-latest-output.json",
"has_config": False,
"old_sklearn": False,
"loads_on_new_sklearn": True,
},
"skops-tests/textclassification-sklearn-latest-hist_gradient_boosting-with-config-pickle": {
"input": "textclassification-latest-input.json",
"output": "textclassification-hist_gradient_boosting-latest-output.json",
"has_config": True,
"old_sklearn": False,
"loads_on_new_sklearn": True,
},
"skops-tests/textclassification-sklearn-latest-hist_gradient_boosting-with-config-skops": {
"input": "textclassification-latest-input.json",
"output": "textclassification-hist_gradient_boosting-latest-output.json",
"has_config": True,
"old_sklearn": False,
"loads_on_new_sklearn": True,
},
"skops-tests/textclassification-sklearn-1.0-hist_gradient_boosting-without-config-pickle": {
"input": "textclassification-1.0-input.json",
"output": "textclassification-hist_gradient_boosting-1.0-output.json",
"has_config": False,
"old_sklearn": True,
"loads_on_new_sklearn": False,
},
"skops-tests/textclassification-sklearn-1.0-hist_gradient_boosting-with-config-pickle": {
"input": "textclassification-1.0-input.json",
"output": "textclassification-hist_gradient_boosting-1.0-output.json",
"has_config": True,
"old_sklearn": True,
"loads_on_new_sklearn": False,
},
"skops-tests/textclassification-sklearn-1.0-hist_gradient_boosting-with-config-skops": {
"input": "textclassification-1.0-input.json",
"output": "textclassification-hist_gradient_boosting-1.0-output.json",
"has_config": True,
"old_sklearn": True,
"loads_on_new_sklearn": False,
},
"skops-tests/textclassification-sklearn-latest-logistic_regression-without-config-pickle": {
"input": "textclassification-latest-input.json",
"output": "textclassification-logistic_regression-latest-output.json",
"has_config": False,
"old_sklearn": False,
"loads_on_new_sklearn": True,
},
"skops-tests/textclassification-sklearn-latest-logistic_regression-with-config-pickle": {
"input": "textclassification-latest-input.json",
"output": "textclassification-logistic_regression-latest-output.json",
"has_config": True,
"old_sklearn": False,
"loads_on_new_sklearn": True,
},
"skops-tests/textclassification-sklearn-latest-logistic_regression-with-config-skops": {
"input": "textclassification-latest-input.json",
"output": "textclassification-logistic_regression-latest-output.json",
"has_config": True,
"old_sklearn": False,
"loads_on_new_sklearn": True,
},
"skops-tests/textclassification-sklearn-1.0-logistic_regression-without-config-pickle": {
"input": "textclassification-1.0-input.json",
"output": "textclassification-logistic_regression-1.0-output.json",
"has_config": False,
"old_sklearn": True,
"loads_on_new_sklearn": True,
},
"skops-tests/textclassification-sklearn-1.0-logistic_regression-with-config-pickle": {
"input": "textclassification-1.0-input.json",
"output": "textclassification-logistic_regression-1.0-output.json",
"has_config": True,
"old_sklearn": True,
"loads_on_new_sklearn": True,
},
"skops-tests/textclassification-sklearn-1.0-logistic_regression-with-config-skops": {
"input": "textclassification-1.0-input.json",
"output": "textclassification-logistic_regression-1.0-output.json",
"has_config": True,
"old_sklearn": True,
"loads_on_new_sklearn": True,
},
},
}
ALL_TASKS = {
"automatic-speech-recognition",
"audio-source-separation",
"feature-extraction",
"image-classification",
"question-answering",
"sentence-similarity",
"tabular-classification",
"text-generation",
"text-to-speech",
"token-classification",
}
class PipelineTestCase(TestCase):
@skipIf(
os.path.dirname(os.path.dirname(__file__)).endswith("common"),
"common is a special case",
)
def test_has_at_least_one_task_enabled(self):
self.assertGreater(
len(ALLOWED_TASKS.keys()), 0, "You need to implement at least one task"
)
def test_unsupported_tasks(self):
unsupported_tasks = ALL_TASKS - ALLOWED_TASKS.keys()
for unsupported_task in unsupported_tasks:
with self.subTest(msg=unsupported_task, task=unsupported_task):
with self.assertRaises(EnvironmentError):
get_pipeline(unsupported_task, model_id="XX")
| 8 |
0 | hf_public_repos/api-inference-community/docker_images/sklearn | hf_public_repos/api-inference-community/docker_images/sklearn/tests/test_api_tabular_regression.py | """Tests for tabular regression
The test class is almost completely copied from TabularClassificationTestCase,
only changing to different parametrized test cases.
"""
import json
import os
from pathlib import Path
from unittest import TestCase, skipIf
import pytest
from app.main import ALLOWED_TASKS
from parameterized import parameterized, parameterized_class
from starlette.testclient import TestClient
from tests.test_api import TEST_CASES, TESTABLE_MODELS
@parameterized_class([{"test_case": x} for x in TESTABLE_MODELS["tabular-regression"]])
@skipIf(
"tabular-regression" not in ALLOWED_TASKS,
"tabular-regression not implemented",
)
class TabularRegressionTestCase(TestCase):
# self.test_case is provided by parameterized_class
def setUp(self):
self.old_model_id = os.getenv("MODEL_ID")
self.old_task = os.getenv("TASK")
os.environ["MODEL_ID"] = self.test_case
os.environ["TASK"] = "tabular-regression"
self.case_data = TEST_CASES["tabular-regression"][self.test_case]
sample_folder = Path(__file__).parent / "generators" / "samples"
self.data = json.load(open(sample_folder / self.case_data["input"], "r"))
self.expected_output = json.load(
open(sample_folder / self.case_data["output"], "r")
)
from app.main import app
self.app = app
def tearDown(self):
if self.old_model_id is not None:
os.environ["MODEL_ID"] = self.old_model_id
else:
del os.environ["MODEL_ID"]
if self.old_task is not None:
os.environ["TASK"] = self.old_task
else:
del os.environ["TASK"]
def _can_load(self):
# to load a model, it has to either support being loaded on new sklearn
# versions, or it needs to be saved by a new sklearn version, since the
# assumption is that the current sklearn version is the latest.
return (
self.case_data["loads_on_new_sklearn"] or not self.case_data["old_sklearn"]
)
def _check_requirement(self, requirement):
# This test is not supposed to run and is thus skipped.
if not requirement:
pytest.skip("Skipping test because requirements are not met.")
def test_success_code(self):
# This test does a sanity check on the output and checks the response
# code which should be 200. This requires the model to be from the
# latest sklearn which is the one installed locally.
self._check_requirement(not self.case_data["old_sklearn"])
data = self.data
expected_output_len = len(self.expected_output)
with TestClient(self.app) as client:
response = client.post("/", json={"inputs": data})
assert response.status_code == 200
content = json.loads(response.content)
assert isinstance(content, list)
assert len(content) == expected_output_len
def test_wrong_sklearn_version_warning(self):
# if the wrong sklearn version is used the model will be loaded and
# gives an output, but warnings are raised. This test makes sure the
# right warnings are raised and that the output is included in the
# error message.
self._check_requirement(self.case_data["old_sklearn"] and self._can_load())
data = self.data
with TestClient(self.app) as client:
response = client.post("/", json={"inputs": data})
# check response
assert response.status_code == 400
content = json.loads(response.content)
assert "error" in content
assert "warnings" in content
# check warnings
assert any("Trying to unpickle estimator" in w for w in content["warnings"])
warnings = json.loads(content["error"])["warnings"]
assert any("Trying to unpickle estimator" in w for w in warnings)
# check error
error_message = json.loads(content["error"])
assert len(error_message["output"]) == len(self.expected_output)
for val_output, val_expected in zip(
error_message["output"], self.expected_output
):
self.assertAlmostEqual(val_output, val_expected)
def test_cannot_load_model(self):
# test the error message when the model cannot be loaded on a wrong
# sklearn version
self._check_requirement(not self.case_data["loads_on_new_sklearn"])
data = self.data
with TestClient(self.app) as client:
response = client.post("/", json={"inputs": data})
assert response.status_code == 400
content = json.loads(response.content)
assert "error" in content
assert "An error occurred while loading the model:" in content["error"]
@parameterized.expand(
[
(["add"], ["The following columns were given but not expected:"]),
(["drop"], ["The following columns were expected but not given:"]),
(
["add", "drop"],
[
"The following columns were given but not expected:",
"The following columns were expected but not given:",
],
),
]
)
def test_extra_columns(self, column_operations, warn_messages):
# Test that the right warning is raised when there are extra columns in
# the input.
self._check_requirement(self.case_data["has_config"] and self._can_load())
data = self.data.copy()
if "drop" in column_operations:
# we remove the first column in the data. Note that `data` is a
# dict of column names to values.
data["data"].pop(next(iter(data["data"].keys())))
if "add" in column_operations:
# we add an extra column to the data, the same as the first column.
# Note that `data` is a dict of column names to values.
data["data"]["extra_column"] = next(iter(data["data"].values()))
with TestClient(self.app) as client:
response = client.post("/", json={"inputs": data})
assert response.status_code == 400
content = json.loads(response.content)
assert "error" in content
assert "warnings" in content
for warn_message in warn_messages:
assert any(warn_message in w for w in content["warnings"])
if "drop" not in column_operations or self.case_data["accepts_nan"]:
# predict does not raise an error
error_message = json.loads(content["error"])
assert len(error_message["output"]) == len(self.expected_output)
if "drop" not in column_operations:
# if no column was dropped, the predictions should be the same
for val_output, val_expected in zip(
error_message["output"], self.expected_output
):
self.assertAlmostEqual(val_output, val_expected)
else:
# otherwise some columns will be empty and predict errors.
assert (
"does not accept missing values encoded as NaN natively"
in content["error"]
)
def test_malformed_input(self):
self._check_requirement(self._can_load())
with TestClient(self.app) as client:
response = client.post("/", data=b"Where do I live ?")
assert response.status_code == 400
content = json.loads(response.content)
assert set(content.keys()) == {"error"}
| 9 |
0 | hf_public_repos/blog/assets | hf_public_repos/blog/assets/89_deep_rl_a2c/test | 0 |
|
0 | hf_public_repos/blog | hf_public_repos/blog/zh/open-llm-leaderboard-mmlu.md | ---
title: "Open LLM 排行榜近况"
thumbnail: /blog/assets/evaluating-mmlu-leaderboard/thumbnail.png
authors:
- user: clefourrier
- user: SaylorTwift
- user: slippylolo
- user: thomwolf
translators:
- user: MatrixYao
- user: zhongdongy
proofreader: true
---
# Open LLM 排行榜近况
Open LLM 排行榜是 Hugging Face 设立的一个用于评测开放大语言模型的公开榜单。最近,随着 [**Falcon 🦅**](https://huggingface.co/tiiuae/falcon-40b) 的发布并在 [Open LLM 排行榜](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard) 上疯狂屠榜,围绕这个榜单在推特上掀起了一轮热烈的讨论。
讨论主要围绕排行榜上的四个评测基准其中之一: [大规模多任务语言理解](https://arxiv.org/abs/2009.03300) (Massive Multitask Language Understanding,MMLU) 基准。
社区发现头部模型 [**LLaMA 🦙**](https://ai.facebook.com/blog/large-language-model-llama-meta-ai/) 在 Open LLM 榜单上的 MMLU 得分比 [LLaMA 论文](https://arxiv.org/abs/2302.13971) 中宣称的数字要低很多,大家都感觉很奇怪。
因此,我们决定深入分析一下这个现象🕳🐇,看看到底是什么原因,又应该如何解决。
在求索的过程中,我们与 [@javier-m](https://huggingface.co/javier-m) 大神进行了讨论,他之前参与了 LLaMA 模型的评估; 我们还与来自 Falcon 团队的 [@slippylolo](https://huggingface.co/slippylolo) 进行了讨论。虽然承教这两位甚多,但文章中所有可能的错误都应该归咎于我们而不是他们!
在这段旅程中,你将学到很多有关如何评测模型的知识,因此,如果后面你在网络上或论文中看到了相关的评测数字,相信你就会有一定的判断力了。
准备好了吗?系好安全带,我们要起飞了🚀。
## Open LLM 排行榜是什么?
首先,请注意 [Open LLM 排行榜](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard) 实际上只是对开源基准测试库 [EleutherAI LM Evaluation Harness](https://github.com/EleutherAI/lm-evaluation-harness) 的一个封装,该库是由 [EleutherAI 非营利性人工智能研究实验室](https://www.eleuther.ai/) 创建的。EleutherAI 实验室是一支在人工智能领域资历深厚的团队,他们有很多耳熟能详的工作,如创建 [The Pile](https://pile.eleuther.ai/) 数据集,训练 [GPT-J](https://huggingface.co/EleutherAI/gpt-j-6b) 、[GPT-Neo-X 20B](https://huggingface.co/EleutherAI/gpt-neox-20b) 以及 [Pythia](https://github.com/EleutherAI/pythia) 模型。
Open LLM 排行榜会在 Hugging Face 计算集群空闲时运行 `lm-evaluation-harness` 以对众多模型进行评测,将结果保存在 Hub 上的一个数据集中,并最终显示在 [排行榜 space](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard) 上。
在 [EleutherAI lm-evaluation-harness](https://github.com/EleutherAI/lm-evaluation-harness) 上运行 LLaMA 模型所得的 MMLU 分数与 LLaMA 论文宣称的分数有很大差距。
为什么会这样?
## MMLU 的 1001 种测法
事实证明,LLaMA 团队使用的是另一个开源实现: 由最初提出并开发 MMLU 基准的加州大学伯克利分校团队实现的版本,见 [这儿](https://github.com/hendrycks/test),我们称其为 **“原始实现”**。
随着调查的进一步深入,我们还发现了另一个有趣的实现: 斯坦福大学 [基础模型研究中心 (CRFM)](https://crfm.stanford.edu/) 开发的一个全面的评估基准: [语言模型整体评估 (Holistic Evaluation of Language Models,HELM)](https://crfm.stanford.edu/helm/latest/) 中也实现了 MMLU 基准,我们将其称为 **HELM 实现**。
EleutherAI Harness 和斯坦福 HELM 的设计理念殊途同归,两者都在单个代码库中集成了多个评测基准 (包括 MMLU),以为模型提供一个全景式性能评估工具。Open LLM 排行榜也秉持相同的理念,因此我们在实现 Open LLM 排行榜时选择了封装 EleutherAI Harness 这样的“整体”基准,而不是集成多个单指标评测代码库。
为了弄清楚得分差异的问题,我们决定在同一组模型上运行 MMLU 评测的三种不同实现,并根据得分对这些模型进行排名:
- Harness 实现 ([commit e47e01b](https://github.com/EleutherAI/lm-evaluation-harness/tree/e47e01beea79cfe87421e2dac49e64d499c240b4))
- HELM 实现 ([提交 cab5d89](https://github.com/stanford-crfm/helm/tree/cab5d89fadbff86190f29ddfa497301958eaf2ec))
- 原始实现 (由 [@olmer](https://huggingface.co/olmer) 大神集成入 Hugging Face ([代码](https://github.com/hendrycks/test/pull/13)) )
(请注意,Harness 实现最近有更新,更多信息请参见文末。)
结果很神奇 (下图只是排名,后文有完整的评分):

我们发现,MMLU 的不同实现给出的得分竟然截然不同,甚至于改变了模型的排名顺序!
下面我们试着了解下这种差异从何而来。🕵️在此之前,我们先简要了解一下大家都如何对现代 LLM 进行自动化评估。
## 现如今,大家都如何对 LLM 模型进行自动化评估
MMLU 测试由一组多项选择题组成,因此相对于开放式问答题这样的题型而言,该基准算是比较简单了。但正如大家后面会看到的,即便这么简单,这里面依然存在一些空间使得实现细节上的差异足以影响评测结果。MMLU 基准涵盖“人文”、“社会科学”、“STEM” 等 57 个通用知识领域,里面的每个问题包含四个可能选项,且每个问题只有一个正确答案。
下面给出一个例子:
```
Question: Glucose is transported into the muscle cell:
Choices:
A. via protein transporters called GLUT4.
B. only in the presence of insulin.
C. via hexokinase.
D. via monocarbylic acid transporters.
Correct answer: A
```
注意: 你可以使用 Hub 的 [数据集查看器](https://huggingface.co/datasets/cais/mmlu/viewer/college_medicine/dev?row=0) 来轻松探索该数据集的更多细节。
大语言模型在所有 AI 模型中其实算比较简单的模型。其输入为“文本字符串”(或称为“提示”),该输入会被切分成词元 (词元可以是单词、子词或字符,具体取决于模型的要求) 并馈送至模型。根据这个输入,模型预测词汇表中每一个词元是下一输出词元的概率,至此,你就得到了词汇表中每一个词适合作为输入提示的下一个词的可能性。
然后,我们可以采取一些策略来从这个概率分布中选择一个词元作为输出词元,例如可以选择概率最大的词元 (或者我们还可以通过采样引入些微噪声,以避免出现“过于机械”的答案)。接着,我们把选择的词元添加到提示中并将其馈送给模型以继续生成下一个词元,依此类推,直至句子结束:

上图展示了 ChatGPT 或 Hugging Chat 生成答案的过程。
总结一下,从模型中获取信息以对其进行评测,主要有两种方法:
1. 获取某一组特定词元的输出 **概率**,并 **将其与样本中的备选项进行比较**;
2. 获取模型 **生成的文本** (用上文所述的方法一个个迭代生成),并 **将这些文本与样本中的备选项进行比较**。
有了这些知识,我们就可以开始深入研究 MMLU 的三种实现,以了解馈送到模型的输入是什么、预期的输出是什么以及如何比较这些输出。
## 各式各样的 MMLU: 提示
我们先比较一下对同一个 MMLU 数据集样本,这三种实现都是如何构建模型输入的:
<div>
<table><p>
<tbody>
<tr style="text-align: left;">
<td>原始实现 <a href="https://github.com/hendrycks/test/pull/13">Ollmer PR</a></td>
<td>HELM 实现<a href="https://github.com/stanford-crfm/helm/tree/cab5d89fadbff86190f29ddfa497301958eaf2ec">commit cab5d89</a> </td>
<td>Harness 实现<a href="https://github.com/EleutherAI/lm-evaluation-harness/tree/e47e01beea79cfe87421e2dac49e64d499c240b4">commit e47e01b</a></td>
</tr>
<tr style=" vertical-align: top;">
<td>The following are multiple choice questions (with answers) about us foreign policy. <br>
How did the 2008 financial crisis affect America's international reputation? <br>
A. It damaged support for the US model of political economy and capitalism <br>
B. It created anger at the United States for exaggerating the crisis <br>
C. It increased support for American global leadership under President Obama <br>
D. It reduced global use of the US dollar <br>
Answer:
</td>
<td>The following are multiple choice questions (with answers) about us foreign policy. <br>
<br>
Question: How did the 2008 financial crisis affect America's international reputation? <br>
A. It damaged support for the US model of political economy and capitalism <br>
B. It created anger at the United States for exaggerating the crisis <br>
C. It increased support for American global leadership under President Obama <br>
D. It reduced global use of the US dollar <br>
Answer:
</td>
<td>Question: How did the 2008 financial crisis affect America's international reputation? <br>
Choices: <br>
A. It damaged support for the US model of political economy and capitalism <br>
B. It created anger at the United States for exaggerating the crisis <br>
C. It increased support for American global leadership under President Obama <br>
D. It reduced global use of the US dollar <br>
Answer:
</td>
</tr>
</tbody>
</table><p>
</div>
可以看到,三者之间差异虽小,但仍不可忽视:
- 首句 (或指令): 差异不大。HELM 实现额外多加了一个空格,但注意 Harness 实现是没有指令句的;
- 问题: HELM 实现和 Harness 实现都加了 `Question:` 前缀;
- 选项: Harness 实现在选项之前加了 `Choice:` 前缀。
## 我们如何用上面构建的提示评估模型?
我们首先看看 [原始实现](https://github.com/hendrycks/test/pull/13) 是如何做的: 其仅比较模型对四个选项字母的预测概率。

这种方法其实是有点放水的,举个例子:

在上图这种情况下,因为在模型给 D 选项的概率在 4 个选项中是最高的,因此原始实现认为模型预测正确,给加了 1 分。但如果我们抬起头来看一下完整的概率输出,单词 “Zygote” 的概率其实是最高的,而它并不属于 4 个选项之一 (这仅是一个示例,而不是一个真实的例子)。
那么,如何确保模型尽可能少犯这类错误呢?
我们可以使用“ **少样本** ”方法,在提示中为模型提供一个或多个范例 (包括示例问题及其答案),如下:

上图,我们在提示中加了一个范例,用于告诉模型我们的预期,这样在预测时模型就不太可能给出超出选项范围的答案。
由于这种方法能够提高性能,因此在全部 3 种实现中,我们均选择了以 5 样本方式进行 MMLU 评估 (即每个提示中都含有 5 个范例)。(注意: 在每个基准测试中,虽然我们用了同样 5 个范例,但它们的排列顺序可能有所不同,这也有可能导致性能差异,但我们在此不深入。另外,我们还必须注意避免范例中的某些答案泄漏到预测样本中 ……)
**HELM 实现:** 我们再看看 [HELM 实现](https://github.com/stanford-crfm/helm/tree/cab5d89fadbff86190f29ddfa497301958eaf2ec)。其少样本提示的实现与原始实现类似,但其模型评估方式与我们刚刚看到的原始实现有很大不同: 其根据模型预测的下一个输出词元的概率来选择输出文本,并将生成的文本与正确答案的文本进行对比,如下所示:

此时,如果输出词元中 “Zygote” 概率最高 (如上图),则模型会输出 “Zygote”,然后 HELM 实现就将其判为错误,模型就无法得分:

**Harness 实现:** 最后我们看下 [截至 2023 年 1 月 的 Harness 实现](https://github.com/EleutherAI/lm-evaluation-harness/tree/e47e01beea79cfe87421e2dac49e64d499c240b4),Open LLM 排行榜使用了该实现。它对同一个数据集的得分计算方法又不一样 (请注意,该实现最近有重大更新,文末有详细信息)。
这里,我们仍然使用概率,但这次用的是完整答案序列的概率,这个完整序列包括选项字母及其后面的答案文本,例如 “C. The second pharyngeal arch”。为了计算整序列的概率,我们获取每个词元的概率 (与上面其他实现一样) 并求它们的联合概率。为了数值稳定性,在计算联合概率时我们主要计算对数概率并对其进行求和,最后对其进行归一化 (可选,也可以不做),归一化主要是将对数联合概率除以词元数,以避免长答案有不公平的得分优势 (稍后会详细介绍)。工作原理如下图所示:

下表总结了每种实现对模型的输出形式的要求:
<div>
<table><p>
<tbody>
<tr style="text-align: left;">
<td>原始实现</td>
<td>HELM 实现</td>
<td>Harness 实现(截至 2023 年 1 月)</td>
</tr>
<tr style=" vertical-align: top;">
<td> 比较选项字母的预测概率:
</td>
<td> 期望模型输出正确选项的字母:
</td>
<td> 比较所有答案文本的概率:
</td>
</tr>
<tr style=" vertical-align: top;">
<td> A <br>
B <br>
C <br>
D
</td>
<td>A
</td>
<td> A. It damaged support for the US model of political economy and capitalism <br>
B. It created anger at the United States for exaggerating the crisis <br>
C. It increased support for American global leadership under President Obama <br>
D. It reduced global use of the US dollar
</td>
</tr>
</tbody>
</table><p>
</div>
搞清楚这些之后,我们比较一下多个模型在这三种实现上的得分:
| | MMLU (HELM 实现) | MMLU (Harness 实现) | MMLU (原始实现) |
|:------------------------------------------|------------:|---------------:|----------------:|
| huggingface/llama-65b | **0.637** | 0.488 | **0.636** |
| tiiuae/falcon-40b | 0.571 | **0.527** | 0.558 |
| huggingface/llama-30b | 0.583 | 0.457 | 0.584 |
| EleutherAI/gpt-neox-20b | 0.256 | 0.333 | 0.262 |
| huggingface/llama-13b | 0.471 | 0.377 | 0.47 |
| huggingface/llama-7b | 0.339 | 0.342 | 0.351 |
| tiiuae/falcon-7b | 0.278 | 0.35 | 0.254 |
| togethercomputer/RedPajama-INCITE-7B-Base | 0.275 | 0.34 | 0.269 |
可以看到,即便对于相同的 MMLU 数据集,模型的绝对分数和相对排名 (参见第一张图) 对评测基准的实现方式仍非常敏感。
假设你已经完美复刻了一个 LLaMA 65B 模型,并使用 Harness 对其进行了评估 (得分 0.488,见上表)。现在,你想要将其与其他人发表的公开结果进行比较 (假设他是在原始 MMLU 实现上进行评估的,得分为 0.637),分数竟相差 30% 之巨。你可能会想: “天哪,我的训练完全毁了😱”。但事实并非如此,这些都只是毫无可比性的数字,即使它们都叫 “MMLU 分数”,且都是在同一个 MMLU 数据集上进行评测的。
那么,是否存在一个评估 LLM 模型性能的“最佳方法”呢?这个问题不好回答。正如我们在上文看到的,使用不同的评测方式对不同的模型进行评估时,其排名会变得混乱。为了尽可能保持公平,人们可能会倾向于选择那个平均打分最高的评测方法,因为看上去好像它更能“解锁”模型的实力。在本文中,这意味着我们应该使用原始实现。但正如我们在上面看到的,使用仅对四个选项的概率进行排序的方式有可能以某种方式给模型放水,而且它更偏心那些性能较弱的模型。此外,从开源模型中获取词元预测概率 (或似然) 可能很容易,但闭源 API 模型可能并不会提供这样的 API。
亲爱的读者,我们说了这么多,你有何高见?不妨到 Open LLM 排行榜的这个帖子中说上两句: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard/discussions/82。
## 总结
整个过程走下来,我们学到了一个重要的教训: 评测得分与实现紧密相关 —— 具体到提示、分词等微小细节的差异都有可能导致最终得分的差异。仅靠 “MMLU 得分” 这几个字不足以带来什么信息量,因为它们所使用的评测代码实现可能不同,所以根本没有可比性。
这就是为什么我们需要开放、标准化、可重复的基准测试。诸如 [EleutherAI Eval Harness](https://github.com/EleutherAI/lm-evaluation-harness/) 或 [Stanford HELM](https://github.com/stanford-crfm/helm/) 这样的开放基准对社区来说是无价的,如果没有它们,我们就无法比较模型和论文之间的结果,更谈不上改进 LLM 了。
**后记**: 就 Open LLM 排行榜而言,我们决定坚持使用社区维护的评估库。值得庆幸的是,本文撰写期间,优秀的 EleutherAI Harness 的社区,尤其是 [ollmer](https://github.com/EleutherAI/lm-evaluation-harness/issues/475)
,完成了一项壮举: 更新了 Harness 实现中的 MMLU 的评测代码,使其不管是实现还是得分都与原始实现更相似。
现在,我们正在用新版的 [EleutherAI Eval Harness](https://github.com/EleutherAI/lm-evaluation-harness/) 重刷排行榜,在未来的几周内你将看到基于 Eleuther Harness v2 的跑分,敬请期待!(重新运行所有模型需要一些时间,请耐心等待,:抱抱:)
## 致谢
非常感谢 LLaMA 团队的 Xavier Martinet、Aurélien Rodriguez 和 Sharan Narang 对本文内容的宝贵建议,并拨冗回答了我们所有的问题。
## 可用于复现的软件及其版本
以下是本文使用的各代码库的 commit 版本。
- EleutherAI LM harness 实现 commit e47e01b: https://github.com/EleutherAI/lm-evaluation-harness/tree/e47e01beea79cfe87421e2dac49e64d499c240b4
- HELM 实现 commit cab5d89: https://github.com/stanford-crfm/helm/tree/cab5d89fadbff86190f29ddfa497301958eaf2ec
- 原始 MMLU 实现 (由 [@olmer](https://huggingface.co/olmer) 大神集成至 Hugging Face): https://github.com/hendrycks/test/pull/13 | 1 |
0 | hf_public_repos/blog | hf_public_repos/blog/zh/lora.md | ---
title: 使用 LoRA 进行 Stable Diffusion 的高效参数微调
thumbnail: /blog/assets/lora/thumbnail.png
authors:
- user: pcuenq
- user: sayakpaul
translators:
- user: innovation64
- user: zhongdongy
proofreader: true
---
# 使用 LoRA 进行 Stable Diffusion 的高效参数微调
[LoRA: Low-Rank Adaptation of Large Language Models](https://arxiv.org/abs/2106.09685) 是微软研究员引入的一项新技术,主要用于处理大模型微调的问题。目前超过数十亿以上参数的具有强能力的大模型 (例如 GPT-3) 通常在为了适应其下游任务的微调中会呈现出巨大开销。LoRA 建议冻结预训练模型的权重并在每个 Transformer 块中注入可训练层 (*秩-分解矩阵*)。因为不需要为大多数模型权重计算梯度,所以大大减少了需要训练参数的数量并且降低了 GPU 的内存要求。研究人员发现,通过聚焦大模型的 Transformer 注意力块,使用 LoRA 进行的微调质量与全模型微调相当,同时速度更快且需要更少的计算。
## 用于 Diffusers 的 LoRA 🧨
尽管 LoRA 最初是为大模型提出的,并在 transformer 块上进行了演示,但该技术也可以应用于其他地方。在微调 Stable Diffusion 的情况下,LoRA 可以应用于将图像表示与描述它们的提示相关联的交叉注意层。下图的细节 (摘自 [Stable Diffusion 论文](https://arxiv.org/abs/2112.10752)) 并不重要,只需要注意黄色块是负责建立图文之间的关系表示就行。

据我们所知,Simo Ryu ([`@cloneofsimo`](https://github.com/cloneofsimo)) 是第一个提出适用于 Stable Diffusion 的 LoRA 实现的人。如果想查看相关示例和许多其他有趣的讨论和见解。请一定要看看他们的 [GitHub 项目](https://github.com/cloneofsimo/lora)。
为了将 LoRA 的可训练矩阵注入到与交叉注意力层一样深的模型中,过去人们需要以富有想象力 (但脆弱) 的方式破解 [diffusers](https://github.com/huggingface/diffusers) 的源代码。如果 Stable Diffusion 向我们展示了一件事,那就是社区总是会想出办法来改变和调整模型以达到创造性目的,我们喜欢这样!由于许多其他原因,提供操纵交叉注意力层的灵活性可能是有益的,例如更容易采用 [xFormers](https://github.com/facebookresearch/xformers) 等优化技术。[Prompt-to-Prompt](https://arxiv.org/abs/2208.01626) 等其他创意项目可以使用一些简单的方法来访问这些层,因此我们决定 [为用户提供一种通用的方法来做到这一点](https://github.com/huggingface/diffusers/pull/1639)。自 12 月下旬以来,我们一直在测试,并在我们的 diffusers 中 [正式发布](https://github.com/huggingface/diffusers/releases/tag/v0.12.0)。
我们一直在与 [`@cloneofsimo`](https://github.com/cloneofsimo) 合作,为 Dreambooth 和全微调方法提供 Diffusions 中的 LoRA 训练支持!这些技术提供了以下好处:
- 更快的训练速度
- 计算要求较低。我们可以在具有 11 GB VRAM 的 2080 Ti 中创建一个全微调模型!
- **小了很多的训练模型**。由于原始模型已冻结,我们注入了新层进行训练,因此我们可以将新层的权重保存为大小约为 3 MB 的单个文件。这比 UNet 模型的原始大小 *小一千倍*。
我们对最后一点特别兴奋。为了让用户分享他们出色的微调或 dreamboothed 模型,他们必须分享最终模型的完整副本。其他想要试用它们的用户必须在他们最喜欢的 UI 中下载经过微调的权重,这会增加大量存储和下载成本。截至今天,[大约有 1000 个 Dreambooth 模型在 Dreambooth 概念库中注册](https://huggingface.co/sd-dreambooth-library),可能还有更多未在库中注册。
使用 LoRA,现在可以发布 [单个 3.29 MB 文件](https://huggingface.co/sayakpaul/sd-model-finetuned-lora-t4/blob/main/pytorch_lora_weights.bin) 以允许其他人使用你的微调模型。
*(感谢 [`@mishig25`](https://github.com/mishig25),他是我了解到的首个在平常对话中将 **dreamboothing** 作为动词的人)。*
## LoRA 微调
Stable Diffusion 的全模型微调过去既缓慢又困难,这也是 Dreambooth 或 Textual Inversion 等轻量级方法变得如此流行的部分原因。使用 LoRA,在自定义数据集上微调模型要容易得多。
Diffusers 现在提供了一个 [LoRA 微调脚本](https://github.com/huggingface/diffusers/blob/main/examples/text_to_image/train_text_to_image_lora.py),可以在低至 11 GB 的 GPU RAM 中运行而无需借助到诸如 8-bit 优化器之类的技巧。这里展示了您如何借助它来使用 [Lambda Labs Pokémon 数据集](https://huggingface.co/datasets/lambdalabs/pokemon-blip-captions) 微调模型:
```bash
export MODEL_NAME="runwayml/stable-diffusion-v1-5"
export OUTPUT_DIR="/sddata/finetune/lora/pokemon"
export HUB_MODEL_ID="pokemon-lora"
export DATASET_NAME="lambdalabs/pokemon-blip-captions"
accelerate launch --mixed_precision="fp16" train_text_to_image_lora.py \
--pretrained_model_name_or_path=$MODEL_NAME \
--dataset_name=$DATASET_NAME \
--dataloader_num_workers=8 \
--resolution=512 --center_crop --random_flip \
--train_batch_size=1 \
--gradient_accumulation_steps=4 \
--max_train_steps=15000 \
--learning_rate=1e-04 \
--max_grad_norm=1 \
--lr_scheduler="cosine" --lr_warmup_steps=0 \
--output_dir=${OUTPUT_DIR} \
--push_to_hub \
--hub_model_id=${HUB_MODEL_ID} \
--report_to=wandb \
--checkpointing_steps=500 \
--validation_prompt="Totoro" \
--seed=1337
```
这里需要注意的一件事是学习率为“1e-4”,远大于常规微调的通常学习率 (通常为“~1e-6”的数量级)。这是上次运行的 [W&B dashboard](https://wandb.ai/pcuenq/text2image-fine-tune/runs/b4k1w0tn?workspace=user-pcuenq),在 2080 Ti GPU (11 GB 内存)。我没有尝试优化超参数,所以请自行尝试 在 T4 (16 GB 内存) 上又跑了一次,请 [在这里](https://huggingface.co/spaces/pcuenq/lora-pokemon) 查看他的最终模型,以及使用该模型的演示空间。

有关 diffusers 中 LoRA 支持的更多详细信息,请参阅 [我们的文档](https://huggingface.co/docs/diffusers/main/en/training/lora) ——它将始终与实现保持同步。
## 推理
正如我们所讨论的,LoRA 的主要优势之一是您可以通过训练比原始模型大小少几个数量级的权重来获得出色的结果。我们设计了一个推理过程,允许在未修改的 Stable Diffusion 模型权重之上加载额外的权重。让我们看看它是如何工作的。
首先,我们将使用 Hub API 自动确定用于微调 LoRA 模型的基本模型是什么。从 [Sayak 的模型](https://huggingface.co/sayakpaul/sd-model-finetuned-lora-t4) 开始,我们可以使用这段代码:
```Python
from huggingface_hub import model_info
# LoRA weights ~3 MB
model_path = "sayakpaul/sd-model-finetuned-lora-t4"
info = model_info(model_path)
model_base = info.cardData["base_model"]
print(model_base) # CompVis/stable-diffusion-v1-4
```
此代码段将打印他用于微调的模型,即 `CompVis/stable-diffusion-v1-4`。就我而言,我从 Stable Diffusion 1.5 版开始训练我的模型,因此如果您使用 [我的 LoRA 模型](https://huggingface.co/pcuenq/pokemon-lora) 运行相同的代码,您会看到输出是 runwayml/stable-diffusion-v1-5。
如果您使用 `--push_to_hub` 选项,我们在上一节中看到的微调脚本会自动填充有关基本模型的信息。正如您在 [pokemon-lora 的介绍文档](https://huggingface.co/pcuenq/pokemon-lora/blob/main/README.md) 中所见,这被记录为模型存储库的 `README` 文件中的元数据标签。
在我们确定了用于使用 LoRA 进行微调的基础模型之后,我们加载了一个正常的稳定扩散管道。我们将使用 `DPMSolverMultistepScheduler` 对其进行自定义,以实现非常快速的推理:
```Python
import torch
from diffusers import StableDiffusionPipeline, DPMSolverMultistepScheduler
pipe = StableDiffusionPipeline.from_pretrained(model_base, torch_dtype=torch.float16)
pipe.scheduler = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
```
**神奇的地方来了**。我们从 hub 加载 LoRA 权重 *在常规模型权重之上*,将 pipline 移动到 cuda 设备并运行推理:
```Python
pipe.unet.load_attn_procs(model_path)
pipe.to("cuda")
image = pipe("Green pokemon with menacing face", num_inference_steps=25).images[0]
image.save("green_pokemon.png")
```
## 用 LoRA 进行 Dreamboothing
Dreambooth 允许您向 Stable Diffusion 模型“教授”新概念。LoRA 与 Dreambooth 兼容,过程类似于微调,有几个优点:
- 训练更快。
- 我们只需要几张我们想要训练的主题的图像 (通常 5 或 10 张就足够了)。
- 如果需要,我们可以调整文本编码器,以提高对训练主体的保真度。
要使用 LoRA 训练 Dreambooth,您需要使用 [这个 diffusers 脚本](https://github.com/huggingface/diffusers/blob/main/examples/dreambooth/train_dreambooth_lora.py)。请看一下相关的项目 [README](https://github.com/huggingface/diffusers/tree/main/examples/dreambooth#training-with-low-rank-adaptation-of-large-language-models-lora)、[文档](https://huggingface.co/docs/diffusers/main/en/training/lora) 和我们的 [超参数探索博文](https://huggingface.co/blog/dreambooth) 了解详细信息.
如果您想体验一种快速、低成本又容易的方式来用 LoRA 训练您的 Dreambooth 模型,请查看 [`hysts`](https://twitter.com/hysts12321) 创作的 这个 [Hugging Face Space](https://huggingface.co/spaces/lora-library/LoRA-DreamBooth-Training-UI)。您需要克隆它,然后为它分配一个 GPU,这样才能运行地足够快。这个过程可以省下您从头开始配置训练环境的功夫,您可以在数分钟内开始训练模型!
## 其他方法
对轻松微调的追求并不新鲜。除了 Dreambooth 之外,[_textual inversion_](https://huggingface.co/docs/diffusers/main/en/training/text_inversion) 是另一种流行的方法,它试图向训练有素的稳定扩散模型教授新概念。使用 Textual Inversion 的主要原因之一是经过训练的权重也很小且易于共享。然而,它们只适用于单个主题 (或一小部分主题),而 LoRA 可用于通用微调,这意味着它可以适应新的领域或数据集。
[Pivotal Tuning](https://arxiv.org/abs/2106.05744) 是一种尝试将 Textual Inversion 与 LoRA 相结合的方法。首先,您使用 textual inversion 技术向模型教授一个新概念,获得一个新的标记嵌入来表示它。然后,您使用 LoRA 训练该 token 嵌入以获得两全其美。
我们还没有使用 LoRA 探索过 Pivotal Tuning。谁想来挑战?🤗
| 2 |
0 | hf_public_repos/blog | hf_public_repos/blog/zh/winning-aimo-progress-prize.md | ---
title: "NuminaMath 是如何荣膺首届 AIMO 进步奖的?"
thumbnail: /blog/assets/winning-aimo-progress-prize/thumbnail.png
authors:
- user: yfleureau
guest: true
org: AI-MO
- user: liyongsea
guest: true
org: AI-MO
- user: edbeeching
- user: lewtun
- user: benlipkin
guest: true
org: AI-MO
- user: romansoletskyi
guest: true
org: AI-MO
- user: vwxyzjn
- user: kashif
translators:
- user: MatrixYao
- user: zhongdongy
proofreader: true
---
# NuminaMath 是如何荣膺首届 AIMO 进步奖的?
今年,[**Numina**](https://projectnumina.ai) 和 Hugging Face 合作角逐 [**AI 数学奥林匹克 (AI Math Olympiad,AIMO)**](https://aimoprize.com) 的首届进步奖。此次比赛旨在对开放 LLM 进行微调,以使其能解决高中难度的国际数学奥林匹克训练题。我们很高兴向大家报告: 我们的模型 - [**NuminaMath 7B TIR**](https://huggingface.co/AI-MO/NuminaMath-7B-TIR) - 在比赛中脱颖而出,成功解决了私有测试集 50 道题中的 29 道🥳!

本文介绍了 Numina 计划以及我们获胜方案背后的技术细节。如果你想先直接用你的数学难题测一下模型,可跳到这个 [**演示**](https://huggingface.co/spaces/AI-MO/math-olympiad-solver) 玩一玩。
我们开始吧!
- [NuminaMath 是如何荣膺首届 AIMO 进步奖的?](#numinamath-是如何荣膺首届-aimo-进步奖的)
- [Numina 简介 - 开放的 AI For Math 计划](#numina-简介---开放的-ai-for-math-计划)
- [AIMO 奖](#aimo-奖)
- [我们的首个进步奖获奖解决方案](#我们的首个进步奖获奖解决方案)
- [训练攻略](#训练攻略)
- [所需惟数据](#所需惟数据)
- [思维链](#思维链)
- [工具整合推理](#工具整合推理)
- [通过自一致性工具整合推理 (SC-TIR) 来抑制高波动](#通过自一致性工具整合推理-sc-tir-来抑制高波动)
- [避免过拟合诅咒](#避免过拟合诅咒)
- [我们尝试过的其他点子](#我们尝试过的其他点子)
- [Numina 的未来 - 寻求贡献者和合作伙伴!](#numina-的未来---寻求贡献者和合作伙伴)
- [致谢](#致谢)
## Numina 简介 - 开放的 AI For Math 计划
数学总有点与众不同在身上!
人人在日常中都会接触数学,孩子们甚至还未识字就先接触了数学。有史以来最伟大的数学家之一 [**拉马努金**](https://en.wikipedia.org/wiki/Srinivasa_Ramanujan) 于 1887 年出生在印度的一个普通家庭,靠自学成为一代大师。每个人与数学都有或大或小的交集,从用它消遣到靠它吃饭各有不同。
无法否认的是,数学对人类至关重要,商业社会的一切事物,从 iPhone 到核电站等等,都根植于数学之上。但,就算是纯面向应用的数学问题,也自有其趣味之处。
纯粹数学超越了智力,就如无边无际的海洋,唯有心灵才可徜徉其中。
这就是为什么当我们启动 [**Numina**](http://projectnumina.ai) 时,开源和开放数据集成了自然之选。相对于人类智能,我们认为人工智能对数学的进步也理应起到应有的广泛作用。如果计算机是思维的自行车,那么人工智能就是它的引擎 —— 它为我们这个时代的拉马努金打开新的视野。
肇始,在 Mistral AI 的支持下,一群对人工智能和数学充满热情的人于 2023 年底集体创立 ( [**Jia Li**](https://x.com/JiaLi52524397)、[**Yann Fleureau**](https://www.linkedin.com/in/yann-flureau-b1179983/)、[**Guillaume Lample**](https://x.com/GuillaumeLample)、[**Stan Polu**](https://x.com/spolu) 以及 [**Hélène Evain**](https://www.linkedin.com/in/h%C3%A9l%C3%A8ne-evain-473815b1)) 了 Numina,其灵感来自于由 Alex Gerko 和 XTX Markets 发起的人工智能数学奥林匹克 (AI Math Olympiad,AIMO) 竞赛。
2024 年初,Numina 团队获得了两位来自 Hugging Face 的 LLM 微调专家的支持 (👋 [**Lewis Tunstall**](https://x.com/_lewtun) 和 [**Ed Beeching**](https://x.com/edwardbeeching)) 从而开始竞逐 [**2024 AIMO 进步奖**](https://www.kaggle.com/competitions/ai-mathematical-olympiad-prize)。随后,我们又获得了 [**General Catalyst**](https://www.generalcatalyst.com/) 和 [**Answer.ai**](http://answer.ai/) 的支持。到 2024 年 3 月,Numina 已聚集了一支 [**来自世界各地的顶尖人才**](http://projectnumina.ai/about-us) 团队。
团队就位后,是时候对 AIMO 发起挑战了!
## AIMO 奖
每年,来自世界各地的高中生都会参加 [**国际数学奥林匹克竞赛**](https://www.imo-official.org) - 一项包含六道富有挑战性的题目,横跨代数、几何、数论等领域的竞赛。为了让大家了解竞赛的难度,下面给出了 [**去年的一道题**](https://www.imo-official.org/problems.aspx):

2023 年 11 月,[**AIMO 奖**](https://aimoprize.com) 启动,旨在推动擅长数学推理的人工智能模型的开放式开发。谁能训出能够赢得 IMO 金牌的 AI 模型,谁就会获得 500 万美元的大奖。除了大奖之外,AIMO 还推出了一系列 **进步奖**,以奖励在实现这一最终目标过程中的里程碑性工作。首个进步奖是以 [**Kaggle 竞赛**](https://www.kaggle.com/competitions/ai-mathematical-olympiad-prize) 的形式举行的,其题目比 IMO 中的题目 _简单一些_ ,相当于 IMO 预选赛的水平。下面,我们给出了一个例题,可以看到,它比上面的 IMO 题目容易一些,但对 LLM 来说仍然很棘手:
> 令 $k, l > 0$ 为参数,抛物线 $y = kx^2 - 2kx + l$ 与直线 $y = 4$ 相交于两点 $A$ 和 $B$,且两点距离为 6。问 $A$ 到原点的距离 和 $B$ 到原点的距离的平方和是多少?
赛题分为两组,每组 50 题,分别作为公开排行榜和私有排行榜,私有排行榜的题目是对参赛者不可见的。这些题目的难度与 [**AMC12**](https://artofproblemsolving.com/wiki/index.php/AMC_12) 和 [**AIME**](https://en.wikipedia.org/wiki/American_Invitational_Mathematics_Examination) 考试相当,其答案均为整数。比赛用私有排行榜决定最终排名。参赛者每天可以提交两次,仅可使用 2 月 23 日之前发布的开放模型。每次提交都会分配一个 P100 GPU 或 2xT4 GPU,最多给 9 个小时来解决 50 道题。
考虑到上述规则和限制,策略选择对于我们开发制胜方案至关重要。
## 我们的首个进步奖获奖解决方案
经过整个比赛的多轮迭代,我们的首个进步奖解决方案主要由三个部分组成:
- 微调 [**DeepSeekMath-Base 7B**](https://huggingface.co/deepseek-ai/deepseek-math-7b-base) 的攻略。通过该攻略,我们将模型打造成可以解决数学题的“推理代理”,其可以通过把自然语言推理和使用 Python REPL 计算中间结果相结合以最终解决问题。
- 一种带代码执行反馈的、为工具整合推理 (tool-integrated reasonin,TIR) 设计的新解码算法,以在推理过程中生成候选解答。
- 用来指导模型选择并避免过拟合公开排行榜的各种内部验证集。
我们使用了多个开源库来训练我们的模型,主要有 [**TRL**](https://github.com/huggingface/trl)、[**PyTorch**](https://github.com/pytorch/pytorch)、[**vLLM**](https://github.com/vllm-project/vllm) 以及 [**DeepSpeed**](https://github.com/microsoft/DeepSpeed)。在一个 8xH100 GPU 节点上,我们花了 10 个小时训成了模型。
## 训练攻略
我们采用的微调方法主要基于 [**MuMath-Code 论文**](https://arxiv.org/abs/2405.07551),其模型训练过程分为两个阶段:

_MuMath-Code 论文中的两阶段训练方法_
- **第 1 阶段:** 在自然语言“数学题 + 解答”的大规模、多样化数据集上微调基础模型,其中每个解答都需套用思维链 (CoT) 模板以促使 LLM 进行推理。
- **第 2 阶段:** 在工具整合推理的合成数据集上微调第 1 阶段得到的模型,其中每个数学题都分解为一系列推理、Python 程序及其输出。此时,我们遵循微软的 [**ToRA 论文**](https://arxiv.org/abs/2309.17452) 的做法,提示 GPT-4 以 ToRA 格式生成带有代码执行反馈的解答。对这些数据进行微调会产生一个推理代理,它可以通过将自然语言推理和使用 Python REPL 来计算中间结果结合起来以解决数学问题 (请参见下图)。

_来自 ToRA 论文的图,该论文介绍了我们用来训练模型的工具集成推理格式。_
这两个阶段,我们都用了“全模型微调”,所有模型权重在反向传播期间都得到了更新。换句话说,我们没有使用像 LoRA 或 DoRA 这样的参数高效技术,因为没有大量实验表明它们能够媲美全模型微调的性能。我们使用 TRL 的 `SFTTrainer` 中的“填充”功能将多个样本串接到一个 2048 个词元的块中。所有模型模型都使能了梯度 checkpointing 训练,并使用 DeepSpeed ZeRO-3 进行分片,以确保权重、梯度和优化器状态能够放进 VRAM。两个阶段使用的主要超参如下:
| | 1 阶段 | 2 阶段 |
| --- | --- | --- |
| 学习率 | 2.0 E-5 | 2.0 E-5 |
| 总 batch size | 32 | 32 |
| 块大小 | 2048 | 1024 |
| epoch 数 | 3 | 4 |
| 学习率调度器 | cosine | cosine |
| 预热率 | 0.1 | 0.1 |
首次提交时,我们使用了 `DeepSeek 7B` 模型,我们仅对它进行了第 1 阶段微调,但我们发现性能相当有限,其在公开排行榜上的最佳 maj@32 成绩仅为 8/50。[**Abdur Rafae**](https://www.kaggle.com/abdurrafae) 的 [**公开笔记本**](https://www.kaggle.com/code/abdurrafae/improved-code-interpretation) 促使我们考虑在训练方案中加入代码执行。最初,我们专注于 [**MMOS (Mix of Minimal Optimal Sets)**](https://github.com/cyzhh/MMOS) 数据集。我们发现使用 MMOS 虽然提高了性能,但在公开排行榜上的 maj@32 最高分仍只有 16/50,我们当时望文生义地猜测其原因是 MMOS 仅包含单轮解 (即该模型仅生成单个 Python 程序,这不足以解决难题)。后来,我们意识到 MMOS 是一个误称,该 Kaggle 笔记本实际上使用的是 [**DeepSeekMath 7B RL**](https://huggingface.co/deepseek-ai/deepseek-math-7b-rl) 模型,也就是说它能够进行多步推理及代码执行。
经此一役,我们想集中精力生成一个与 DeepSeekMath Instruct/RL 模型使用的数据集类似的数据集,这一做法与 MuMath-Code 攻略结合后,带来了显著的改进。
下面,一起来看看我们是如何构建这些数据集的吧。
## 所需惟数据
在构建数据集时,我们广泛参考了 DeepSeek Math 和其他学者的方法,并对它们进行了大幅扩展。我们生成了含数十万 _数学题 - 解答_ 对的微调数据集,涵盖从高中数学到竞赛级数学的各种知识点。接下来的几周,我们会将该数据集完全开源。同时。我们还可能会用更大的模型来检查我们攻略的可扩展性。有关数据集构建的详细信息,请参阅我们即将发布的数据集技术报告。
具体到这次进步奖,我们为此构建了两个数据集以微调模型。
### 思维链
该数据集由数十万个题目组成,每题都有以思维链的方式编写的解答。数据集的来源范围有中国高中数学练习以及美国及国际数学奥林匹克竞赛题目。数据主要来自在线试卷 PDF 和数学论坛。
处理步骤如下:
1. 对原始 PDF 进行 OCR。
2. 分割为“题目 - 解答”对。
3. 翻译成英文。
4. 重新调整以变成思维链推理格式。
5. 格式化为最终答案。
### 工具整合推理
工具整合推理 (TIR) 在本次比赛中发挥了至关重要的作用。然而,收集和标注此类数据既昂贵又耗时。为了解决这个问题,我们从 Numina 数据集中选择了大约 6 万道题,重点关注那些答案为数字的题,其中大多数答案是整数。
然后,我们利用 GPT-4 的流水线生成类似 TORA 的推理路径,执行代码并生成结果,直到生成完整解答。我们筛选出最终答案与参考答案不匹配的解答,并重复此过程三次,以确保准确性和一致性。这种迭代方法使我们能够高效地生成高质量的 TORA 数据。
作为参考,以下是我们训得的第 1 阶段模型 **NuminaMath-7B-CoT** 和第 2 阶段模型 **NuminaMath-7B-TIR** [在 **MATH 基准**](https://arxiv.org/abs/2103.03874) 上与其他开放及私有模型的跑分对比:
| 模型 | MATH (%) |
|--------------------------|--------------------------------|
| | **思维链推理** |
| GPT-4 (2023) | 42.5 |
| GPT-4o | 76.6 |
| Claude 3.5 Sonnet | 71.1 |
| DeepSeekMath-7B-Instruct | 46.8 |
| DeepSeekMath-7B-RL | 51.7 |
| NuminaMath-7B-CoT | 56.3 |
| | **工具整合推理** |
| DeepSeekMath-7B-Instruct | 57.4 |
| DeepSeekMath-7B-RL | 58.8 |
| NuminaMath-7B-TIR | 68.2 |
_各模型在 MATH 基准上的表现。除非明确说明,所有跑分均由零样本贪心解码获得。_
## 通过自一致性工具整合推理 (SC-TIR) 来抑制高波动
正如其他参赛者指出的那样,本次比赛在模型提交和评估方面都带来了一些挑战:
- 评估 API 以随机顺序出题,因此提前停止等策略会产生较高的波动,因为可能一开始就会遇到很多难题,这就导致留给剩余部分的时间就不多了 (反之亦然)。
- LLM 推理中的大多数创新都是基于最新的 GPU 的,因此 `Flash Attention 2` 或 `torch.compile` 等标准方法不适用于 T4 GPU。同样,老 GPU 并不支持 bfloat16 等新数据类型,这促使我们探索 AWQ 和 GPTQ 等训后量化方法。
最初,我们使用 [**Abdur Rafae**](https://www.kaggle.com/abdurrafae) 的 [**公开笔记本**](https://www.kaggle.com/code/abdurrafae/improved-code-interpretation) 来提交,但发现高波动是个大问题。为了解决这个问题,我们采取了一种基于工具整合推理的新方法:

1. 将每道题复制 N 次以生成 vLLM 的一个 batch。N 可以看成多数投票时的候选数量。
2. 对这 N 个输入进行采样解码,直至生成完整的 Python 代码块。
3. 执行每个 Python 代码块并将其输出串接在代码后面,包括栈回溯 (如有)。
4. 重复 M 次以生成 N 个、深度为 M 的生成,允许模型使用栈回溯自纠正代码错误。如果某个样本无法生成合理的输出 (如,生成了不完整的代码块),就删除之。
5. 对候选解答进行后处理,并使用多数投票来选择最终答案。
我们获胜的提交使用的 `N=48,M=4` 。因为增加任一参数的数值并不会提高性能,所以我们就选择了这两个最小值以保证满足时间限制。实际上,该算法通过工具整合推理增强了 [**CoT 的自一致性**](https://arxiv.org/abs/2305.10601) (如下所示)。

我们发现,我们的 SC-TIR 算法产生了更稳健的结果,并且显著降低了在内部评估集和公开排行榜上的波动。
值得一提的一个技术细节是,我们发现以 8 比特精度量化模型很有用。原因有三:
- 将模型上传到 Kaggle Hub 非常慢,压缩模型使上传速度提高了一倍。
- T4 GPU 不支持 bfloat16,转换为 float16 会导致模型性能下降。又没法转换为 float32,因为超出了 GPU 可用内存。
- 此外,16 位模型仅用于加载权重就需消耗约 32GB VRAM。对于 2xT4,需要使能 KV 缓存才能快速运行,我们发现在模型精度和速度之间进行折衷是有益的。
我们使用 [**AutoGPTQ**](https://github.com/AutoGPTQ/AutoGPTQ) 以及用于校准数据集来量化我们的模型。在实践中,这会导致准确性小幅下降,但提供了最佳折衷方案,以适配 Kaggle 平台对模型评估所施加的限制。
### 避免过拟合诅咒
过拟合公开排行榜是 Kaggle 竞赛中的常见风险,当测试集只有 50 道题时更是如此。此外,规则允许每天最多提交两次,这使得强大的内部验证数据集对于我们的开发节奏至关重要。根据 AIMO 团队的规定,测试题的难度为中等,介于 AMC12 和 AIME 级别之间,且每题答案为整数。
为了指导模型选择,我们使用了四个内部验证集来衡量模型在不同难度的数学题上的性能。为了避免基础模型中潜在的数据污染,我们从 AMC12 (2022、2023) 和 AIME (2022、2023、2024) 中选择题目以创建两个内部验证数据集:
- **AMC (83 道题):** 我们选择了 [**AMC12**](https://artofproblemsolving.com/wiki/index.php/AMC_12_Problems_and_Solutions) 22、AMC12 23 的所有题目,并保留了那些结果为整数的题目。最终生成的数据集包含 83 道题。该验证集旨在模拟 Kaggle 上的私有测试集,因为我们从竞赛描述中知道题目难度大于等于这个级别。我们发现我们的模型可以解答大约 60-65% 的题目。为了测量波动,每次评估时,我们使用 5-10 个不同的种子,使用我们的 SC-TIR 算法通常会看到大约 1-3% 的波动。
- **AIME (90 道题):** 我们选择了 [**AIME 22**](https://artofproblemsolving.com/wiki/index.php/2022_AIME_I)、[**AIME 23**](https://artofproblemsolving.com/wiki/index.php/2023_AIME_I_Problems) 以及 [**AIME 24**](https://artofproblemsolving.com/wiki/index.php/2024_AIME_I) 的所有题目来度量我们模型解决难题的表现如何,并观测最常见的错误模式。同上,每次评估,我们使用 5-10 个种子进行以测量波动。
由于 AMC/AIME 验证集规模较小,与公开排行榜类似,这些数据集上的模型性能容易受噪声的影响。为了更好地评估模型的性能,我们还使用 MATH 测试集的子集 (含 5,000 道题) 对其进行了评估。我们仅保留答案为整数的题目,以简化多数投票并模拟奥赛评估。因此,我们又多了两个验证集:
- **MATH 4 级 (754 道题)**
- **MATH 5 级 (721 道题)**
通过使用这四个验证集,我们能够在不同的训练阶段选择最有潜力的模型,并缩小超参的选择范围。我们发现,对本 AIMO 赛程而言,将小型但具代表性的验证集与较大的验证集相结合是有用的,因为每个提交都受到抽样随机性的影响。
## 我们尝试过的其他点子
上文有提及,我们在过程中还尝试了一些其他方法,但最终放弃,转而采用 MuMath-Code 的方法。我们尝试过的方法有:
- 训练纯 CoT 模型并使用多数投票进行评估
- 训练 MMOS 模型以通过 Python 一步解决问题
我们还试过对 SFT 模型生成的补全应用 [**Kahneman-Tversky Optimization (KTO)**](https://arxiv.org/abs/2402.01306),具体想法有点类似于 [**OrcaMath**](https://arxiv.org/abs/2402.14830),即:
- 交织使用推理和代码执行,每道题用 SFT 模型采样出 4 个补全。我们使用第 2 阶段的 SFT 数据集作为提示。
- 提取答案并将其与标注答案进行比较。如果正确,则将样本标记为正,否则标记为负。
- 在此数据集上对 SFT 模型应用 KTO。
我们发现这种形式的同策 KTO 生成的模型比 SFT 模型稍好 (内部评估好几个百分点),在公开排行榜上得分为 27/50。
KTO 的一个很好的功能是,你可以在训练期间跟踪隐式奖励,这确实有助于调试 - 如,下图展示了我们成功的训练日志之一,其中人们可以看到正确答案的奖励随着训练而增加,而错误答案的奖励则被抑制。

但,由于时间关系,我们最终没有将此方法应用于最终的 SFT 模型。如果我们做了的话,可能还能多做对 1-2 道题!
我们还尝试将我们的 SFT 攻略应用于 InternLM-20B、CodeLama-33B 和 Mixtral-8x7B 等更大的模型,但发现 (a) DeepSeek 7B 模型由于已在数学上进行过增量预训练而很难被击败,且 (b) 在 2xT4 GPU 上推理速度非常慢,并且我们遇到了许多神秘的超时,但我们无法分析到其根因。
还有一个失败的实验是尝试将强化学习 (特别是 PPO 算法及 [**Reinforce-Leave-One-Out (RLOO) 算法**](https://arxiv.org/abs/2402.14740)) 和代码执行反馈结合起来以生成对编写代码及获得正确/错误解答的奖励。我们将其应用于 `DeepSeekMath 7B RL` 模型。虽然我们看到了一些很不错的奖励曲线,但我们没有看到性能有任何显著的提升。鉴于像 RLOO 这样的在线方法受限于文本生成的性能并且迭代缓慢,我们放弃了强化学习,转而尝试 KTO。

在推理方面,我们也进行了如下实验:
- 使用静态 KV 缓存和 torch 编译。我们发现我们能够在 H100 上将原生 `transformers` 代码的生成速度加快 2-3 倍,但在 Kaggle T4 上会遇到各种神秘错误,主要是由于 `accelerate` 中的 torch 编译缺乏对模型分片的支持。
各种模型合并技术,例如 [**DARE**](https://arxiv.org/abs/2311.03099)、 [**TIES**](https://arxiv.org/abs/2306.01708) 以及 [**WARP**](https://arxiv.org/abs/2406.16768v1)。这里我们使用 [**mergekit**](https://github.com/arcee-ai/mergekit) 来合并 SFT 和 KTO 模型,或者将 SFT 模型与公开的 `DeepSeekMath` 模型合并。总的来说,我们发现这些合并导致我们的内部评估出现重大倒退,并且我们没有时间对此进行更深入探索。
## Numina 的未来 - 寻求贡献者和合作伙伴!
继 Numina 初步成功赢得 AIMO 2024 进步奖之后,我们的目标变得更为宏大,即肩负促进数学领域人工智能和人类智能发展的使命。你可以访问我们的网站,了解有关我们项目的更多信息,请随时通过 [**[email protected]**](mailto:[email protected]) 给我们留言。
Numina 旨在向世界各地愿意通过人工智能进一步推动数学发展的人才和支持者开放,保持数学的开放本质!
## 致谢
我们感谢 Thomas Wolf 和 Leandro von Werra 促成了 Numina 和 Hugging Face 的合作。我们还感谢 Hugo Larcher 在我们使用 Hugging Face GPU 集群的过程中提供的帮助,Colin Raffel 对模型合并方法的建议,以及 Omar Sanseviero 对博文的反馈。
我们还想感谢 [**Mistral.ai**](https://mistral.ai)、[**General Catalyst**](https://www.generalcatalyst.com/)、[**Answer.AI**](https://answerai.pro) 以及 [**北京大学北京国际数学研究中心**](https://bicmr.pku.edu.cn/) 自项目伊始的支持。
最后,我们感谢 AIMO 团队发起了如此令人激动、鼓舞人心的比赛! | 3 |
0 | hf_public_repos/blog | hf_public_repos/blog/zh/gemma.md | ---
title: "欢迎 Gemma: Google 最新推出开放大语言模型"
thumbnail: /blog/assets/gemma/thumbnail.jpg
authors:
- user: philschmid
- user: osanseviero
- user: pcuenq
translators:
- user: chenglu
---
# 欢迎 Gemma: Google 最新推出开放大语言模型
今天,Google 发布了一系列最新的开放式大型语言模型 —— Gemma!Google 正在加强其对开源人工智能的支持,我们也非常有幸能够帮助全力支持这次发布,并与 Hugging Face 生态完美集成。
Gemma 提供两种规模的模型:7B 参数模型,针对消费级 GPU 和 TPU 设计,确保高效部署和开发;2B 参数模型则适用于 CPU 和移动设备。每种规模的模型都包含基础版本和经过指令调优的版本。
我们与 Google 紧密合作,确保 Gemma 能够无缝集成到 Hugging Face 的生态系统中。在 Hub 上,你可以找到这四个公开可访问的模型(包括两个基础模型和两个经过调优的模型)。此次发布的亮点包括:
- [Hub 上的模型](https://huggingface.co/models?search=google/gemma),包括模型说明和授权信息
- [🤗 Transformers 的集成](https://github.com/huggingface/transformers/releases/tag/v4.38.0)
- 与 Google Cloud 的深度集成
- 与推理端点 (Inference Endpoints) 的集成
- 使用 🤗 TRL 在单个 GPU 上对 Gemma 进行微调的示例
## 目录
- [Gemma 是什么?](#what-is-gemma)
- [提示格式](#prompt-format)
- [探索未知](#exploring-the-unknowns)
- [演示](#demo)
- [使用 🤗 Transformers](#using-🤗-transformers)
- [JAX 权重](#jax-weights)
- [与 Google Cloud 的集成](#integration-with-google-cloud)
- [与推理端点的集成](#integration-with-inference-endpoints)
- [使用 🤗 TRL 进行微调](#fine-tuning-with-🤗-trl)
- [额外资源](#additional-resources)
- [致谢](#acknowledgments)
## Gemma 是什么?
Gemma 是 Google 基于 Gemini 技术推出的四款新型大型语言模型(LLM),提供了 2B 和 7B 两种不同规模的版本,每种都包含了预训练基础版本和经过指令优化的版本。所有版本均可在各类消费级硬件上运行,无需数据量化处理,拥有高达 8K tokens 的处理能力:
- [gemma-7b](https://huggingface.co/google/gemma-7b):7B 参数的基础模型。
- [gemma-7b-it](https://huggingface.co/google/gemma-7b-it):7B 参数的指令优化版本。
- [gemma-2b](https://huggingface.co/google/gemma-2b):2B 参数的基础模型。
- [gemma-2b-it](https://huggingface.co/google/gemma-2b-it):2B 参数的指令优化版本。
<div class="flex items-center justify-center">
<img src="/blog/assets/gemma/Gemma-logo-small.png" alt="Gemma logo">
</div>
Gemma 模型的性能如何?以下是其基础版本与其他开放模型在 [LLM 排行榜](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard) 上的比较(得分越高越好):
| 模型 | 许可证 | 商业使用 | 预训练大小 [tokens] | 排行榜分数 ⬇️ |
| -------------------------------------------------------------------------------- | --------------- | --------------- | ------------------------- | -------------------- |
| [LLama 2 70B Chat (参考)](https://huggingface.co/meta-llama/Llama-2-70b-hf) | Llama 2 许可证 | ✅ | 2T | 67.87 |
| [Gemma-7B](https://huggingface.co/google/gemma-7b) | Gemma 许可证 | ✅ | 6T | 63.75 |
| [DeciLM-7B](https://huggingface.co/Deci/DeciLM-7B) | Apache 2.0 | ✅ | 未知 | 61.55 |
| [PHI-2 (2.7B)](https://huggingface.co/microsoft/phi-2) | MIT | ✅ | 1.4T | 61.33 |
| [Mistral-7B-v0.1](https://huggingface.co/mistralai/Mistral-7B-v0.1) | Apache 2.0 | ✅ | 未知 | 60.97 |
| [Llama 2 7B](https://huggingface.co/meta-llama/Llama-2-7b-hf) | Llama 2 许可证 | ✅ | 2T | 54.32 |
| [Gemma 2B](https://huggingface.co/google/gemma-2b) | Gemma 许可证 | ✅ | 2T | 46.51 |
在 7B 参数级别,Gemma 表现出色,与市场上最佳模型如 Mistral 7B 不相上下。而 2B 版本的 Gemma 虽然规模较小,但在其类别中的表现也颇具竞争力,尽管在排行榜上的得分并未超越类似规模的顶尖模型,例如 Phi 2。我们期待社区对这些模型的真实使用反馈,以进一步优化和调整。
需要浅浅再强调一下:LLM 排行榜特别适用于衡量预训练模型的质量,而不太适用于聊天模型。我们鼓励对聊天模型运行其他基准测试,如 MT Bench、EQ Bench 和 lmsys Arena。
### Prompt 提示词格式
Gemma 的基础模型不限定特定的提示格式。如同其他基础模型,它们能够根据输入序列生成一个合理的续接内容,适用于零样本或少样本的推理任务。这些模型也为针对特定应用场景的微调提供了坚实的基础。指令优化版本则采用了一种极其简洁的对话结构:
```xml
<start_of_turn>user
knock knock<end_of_turn>
<start_of_turn>model
who is there<end_of_turn>
<start_of_turn>user
LaMDA<end_of_turn>
<start_of_turn>model
LaMDA who?<end_of_turn>
```
要有效利用这一格式,必须严格按照上述结构进行对话。我们将演示如何利用 `transformers` 库中提供的聊天模板简化这一过程。
### 探索未知领域
尽管技术报告提供了关于基础模型训练和评估过程的信息,但关于数据集构成和预处理的具体细节则较为欠缺。据悉,这些模型是基于来自互联网文档、编程代码和数学文本等多种数据源训练而成,经过严格筛选,以排除含有敏感信息和不适内容的数据。
对于 Gemma 的指令优化模型,关于微调数据集以及与顺序微调技术(SFT)和 [基于人类反馈的强化学习(RLHF)](https://huggingface.co/blog/rlhf)相关的超参数设置,细节同样未公开。
## 演示
现在,你可以在 Hugging Chat 上体验与 Gemma 指令模型的互动对话!点击此处访问:[https://huggingface.co/chat?model=google/gemma-7b-it](https://huggingface.co/chat?model=google/gemma-7b-it)
### 使用 🤗 Transformers
借助 Transformers 的 [4.38 版本](https://github.com/huggingface/transformers/releases/tag/v4.38.0),你可以轻松地使用 Gemma 模型,并充分利用 Hugging Face 生态系统内的工具,包括:
- 训练和推理脚本及示例
- 安全文件格式(`safetensors`)
- 集成了诸如 bitsandbytes(4位量化)、PEFT(参数效率微调)和 Flash Attention 2 等工具
- 辅助工具和帮助器,以便使用模型进行生成
- 导出模型以便部署的机制
另外,Gemma 模型支持 `torch.compile()` 与 CUDA 图的结合使用,在推理时可实现约 4 倍的速度提升!
确保你使用的是最新版本的 `transformers`:
```jsx
pip install --upgrade transformers
```
以下代码片段展示了如何结合 transformers 使用 `gemma-7b-it`。运行此代码需大约 18 GB 的 RAM,适用于包括 3090 或 4090 在内的消费级 GPU。
```python
from transformers import pipeline
import torch
pipe = pipeline(
"text-generation",
model="google/gemma-7b-it",
model_kwargs={"torch_dtype": torch.bfloat16},
device="cuda",
)
messages = [
{"role": "user", "content": "Who are you? Please, answer in pirate-speak."},
]
outputs = pipe(
messages,
max_new_tokens=256,
do_sample=True,
temperature=0.7,
top_k=50,
top_p=0.95
)
assistant_response = outputs[0]["generated_text"][-1]["content"]
print(assistant_response)
```
> Avast me, me hearty. I am a pirate of the high seas, ready to pillage and plunder. Prepare for a tale of adventure and booty!
>
- 我们使用了 `bfloat16` 数据类型进行模型推理,该数据类型是所有评估中使用的参考精度。如果你的硬件支持,使用 `float16` 可能会更快。
- 你还可以将模型自动量化,以 8 位或 4 位模式加载。以 4 位模式加载模型大约需要 9 GB 的内存,使其适用于多种消费级显卡,包括 Google Colab 上的所有 GPU。以下是以 4 位加载生成 pipeline 的方法:
```jsx
pipeline = pipeline(
"text-generation",
model=model,
model_kwargs={
"torch_dtype": torch.float16,
"quantization_config": {"load_in_4bit": True}
},
)
```
更多关于如何使用 transformers 和模型的详情,请参阅 [模型卡片](https://huggingface.co/google/gemma-7b)。
### JAX 权重
所有 Gemma 模型变种都可以用 PyTorch 或 JAX / Flax 使用。若要加载 Flax 权重,你需要按照以下方式使用仓库中的 `flax` 修订版本:
```python
import jax.numpy as jnp
from transformers import AutoTokenizer, FlaxGemmaForCausalLM
model_id = "google/gemma-2b"
tokenizer = AutoTokenizer.from_pretrained(model_id)
tokenizer.padding_side = "left"
model, params = FlaxGemmaForCausalLM.from_pretrained(
model_id,
dtype=jnp.bfloat16,
revision="flax",
_do_init=False,
)
inputs = tokenizer("Valencia and Málaga are", return_tensors="np", padding=True)
output = model.generate(inputs, params=params, max_new_tokens=20, do_sample=False)
output_text = tokenizer.batch_decode(output.sequences, skip_special_tokens=True)
```
> `['Valencia and Málaga are two of the most popular tourist destinations in Spain. Both cities boast a rich history, vibrant culture,']`
>
如果你在 TPU 或多个 GPU 设备上运行,可以利用 `jit` 和 `pmap` 来编译和并行执行推理任务。
## 与 Google Cloud 集成
你可以通过 Vertex AI 或 Google Kubernetes Engine (GKE) 在 Google Cloud 上部署和训练 Gemma,利用 [文本生成推理](https://huggingface.co/docs/text-generation-inference/index) 和 Transformers 实现。
要从 Hugging Face 部署 Gemma 模型,请访问[模型页面](https://huggingface.co/google/gemma-7b-it)并点击[部署 -> Google Cloud](https://huggingface.co/google/gemma-7b-it)。这将引导你进入 Google Cloud Console,在那里你可以通过 Vertex AI 或 GKE 一键部署 Gemma。文本生成推理为 Gemma 在 Google Cloud 上的部署提供支持,这是我们[与 Google Cloud 合作伙伴关系的初步成果](https://huggingface.co/blog/gcp-partnership)。

你也可以通过 Vertex AI Model Garden 直接访问 Gemma。
要在 Hugging Face 上微调 Gemma 模型,请访问 [模型页面](https://huggingface.co/google/gemma-7b-it) 并点击 [训练 -> Google Cloud](https://huggingface.co/google/gemma-7b-it)。这将引导你进入 Google Cloud Console,在那里你可以在 Vertex AI 或 GKE 上访问笔记本,以在这些平台上微调 Gemma。

这些集成是我们 [与 Google 合作伙伴关系成果的一部分](https://huggingface.co/blog/gcp-partnership),未来还会有更多精彩内容发布,敬请期待!
## 与推理端点集成
你可以在 Hugging Face 的 [推理端点](https://ui.endpoints.huggingface.co/new?repository=google%2Fgemma-7b-it) 上部署 Gemma,该端点使用文本生成推理作为后端。[文本生成推理](https://github.com/huggingface/text-generation-inference) 是由 Hugging Face 开发的可用于生产环境的推理容器,旨在简化大型语言模型的部署。它支持连续批处理、令牌流式传输、多 GPU 张量并行加速推理,并提供生产就绪的日志记录和跟踪功能。
要部署 Gemma 模型,请访问 HF Hub [模型页面](https://huggingface.co/google/gemma-7b-it) 并点击 [部署 -> 推理端点](https://ui.endpoints.huggingface.co/new?repository=google%2Fgemma-7b-it)。有关 [使用 Hugging Face 推理端点部署 LLM](https://huggingface.co/blog/inference-endpoints-llm)的更多信息,请参阅我们之前的博客文章。推理端点通过文本生成推理支持 [消息 API](https://huggingface.co/blog/tgi-messages-api),使你可以通过简单地更换 URL 从其他封闭模型切换到开放模型。
```bash
from openai import OpenAI
# initialize the client but point it to TGI
client = OpenAI(
base_url="<ENDPOINT_URL>" + "/v1/", # replace with your endpoint url
api_key="<HF_API_TOKEN>", # replace with your token
)
chat_completion = client.chat.completions.create(
model="tgi",
messages=[
{"role": "user", "content": "Why is open-source software important?"},
],
stream=True,
max_tokens=500
)
# iterate and print stream
for message in chat_completion:
print(message.choices[0].delta.content, end="")
```
## 使用 🤗 TRL 进行微调
在消费级 GPU 上训练大型语言模型既是技术上的挑战,也是计算上的挑战。本节将介绍 Hugging Face 生态系统中可用的工具,这些工具可以帮助你高效地在消费级 GPU 上训练 Gemma。
一个微调 Gemma 的示例命令如下。我们利用 4 位量化和 QLoRA(一种参数效率微调技术)来减少内存使用,目标是所有注意力块的线性层。值得注意的是,与密集型 Transformer 不同,MLP 层(多层感知器层)因其稀疏性不适合与 PEFT(参数效率微调)技术结合使用。
首先,安装 🤗 TRL 的最新版本并克隆仓库以获取 [训练脚本](https://github.com/huggingface/trl/blob/main/examples/scripts/sft.py):
```jsx
pip install -U transformers trl peft bitsandbytes
git clone https://github.com/huggingface/trl
cd trl
```
然后运行脚本:
```jsx
accelerate launch --config_file examples/accelerate_configs/multi_gpu.yaml --num_processes=1 \
examples/scripts/sft.py \
--model_name google/gemma-7b \
--dataset_name OpenAssistant/oasst_top1_2023-08-25 \
--per_device_train_batch_size 2 \
--gradient_accumulation_steps 1 \
--learning_rate 2e-4 \
--save_steps 20_000 \
--use_peft \
--lora_r 16 --lora_alpha 32 \
--lora_target_modules q_proj k_proj v_proj o_proj \
--load_in_4bit \
--output_dir gemma-finetuned-openassistant
```
在单个 A10G GPU 上,这个训练过程大约需要 9 小时。通过调整 `--num_processes` 参数为你可用的 GPU 数量,可以实现并行化训练,从而缩短训练时间。
## 额外资源
- [Hub 上的模型](https://huggingface.co/models?other=gemma)
- 开放 LLM [排行榜](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard)
- [Hugging Chat 上的聊天演示](https://huggingface.co/chat?model=google/gemma-7b-it)
- [Gemma 官方博客](https://blog.google/technology/developers/gemma-open-models/)
- [Gemma 产品页面](https://ai.google.dev/gemma)
- [Vertex AI 模型花园链接](https://console.cloud.google.com/vertex-ai/publishers/google/model-garden/335)
- Google Notebook 教程
## 致谢
此次发布和在生态系统中的集成是由包括 [Clémentine](https://huggingface.co/clefourrier)、[Eleuther 评估工具](https://github.com/EleutherAI/lm-evaluation-harness)(LLM 评估)、[Olivier](https://huggingface.co/olivierdehaene)、[David](https://huggingface.co/drbh)(文本生成推理支持)、[Simon](https://huggingface.co/sbrandeis)(在 Hugging Face 上开发新的访问控制特性)、[Arthur](https://huggingface.co/ArthurZ)、[Younes](https://huggingface.co/ybelkada)、[Sanchit](https://huggingface.co/sanchit-gandhi)(将 Gemma 集成到 transformers 中)、[Morgan](https://huggingface.co/mfuntowicz)(将 Gemma 集成到 optimum-nvidia,即将推出)、[Nathan](https://huggingface.co/nsarrazin)、[Victor](https://huggingface.co/victor)、[Mishig](https://huggingface.co/mishig)(使 Gemma 在 Hugging Chat 上可用)等众多社区成员的共同努力而成。
我们特别感谢 Google 团队发布 Gemma 并使其在开源 AI 社区中可用,为推动开放式人工智能发展做出了重要贡献。
| 4 |
0 | hf_public_repos/blog | hf_public_repos/blog/zh/setfit-optimum-intel.md | ---
title: "在英特尔至强 CPU 上使用 🤗 Optimum Intel 实现超快 SetFit 推理"
thumbnail: /blog/assets/optimum_intel/intel_thumbnail.png
authors:
- user: danielkorat
guest: true
- user: tomaarsen
- user: orenpereg
guest: true
- user: moshew
guest: true
- user: echarlaix
- user: aprabh2
guest: true
translators:
- user: MatrixYao
---
# 在英特尔至强 CPU 上使用 🤗 Optimum Intel 实现超快 SetFit 推理
在缺少标注数据场景,SetFit 是解决的建模问题的一个有前途的解决方案,其由 Hugging Face 与 [Intel 实验室](https://www.intel.com/content/www/us/en/research/overview.html) 以及 [UKP Lab](https://www.informatik.tu-darmstadt.de/ukp/ukp_home/index.en.jsp) 合作共同开发。作为一个高效的框架,SetFit 可用于对 [Sentence Transformers](https://sbert.net/) 模型进行少样本微调。
SetFit 仅需很少的标注数据就能达到较高的准确率,例如,在使用 3-示例提示时,SetFit [优于](https://arxiv.org/pdf/2311.06102.pdf) GPT-3.5;在使用 5-示例提示时,其在 Banking 77 金融意图数据集上的表现也优于使用 3-示例提示的 GPT-4。
与基于 LLM 的方法相比,SetFit 有两个独特的优势:
<p>🗣 <strong>无需提示或词-标签映射器</strong>:基于 LLM 的少样本上下文学习依赖于人工制作的提示,其对措辞比较敏感,且依赖用户的专业知识,因此效果比较脆弱。SetFit 直接从少量标注文本样本中生成丰富的嵌入,从而完全省去了提示。</p>
<p>🏎 <strong>训练速度快</strong>:SetFit 不依赖 GPT-3.5 或 Llama2 等 LLM 来实现高准确率。因此,训练和推理速度通常要快一个数量级(或更多)。</p>
有关 SetFit 的更多详情,请参阅:[论文](https://arxiv.org/abs/2209.11055)、[博客](https://huggingface.co/blog/setfit)、[代码](https://github.com/huggingface/setfit)及相关[数据](https://huggingface.co/SetFit)。
Setfit 已被 AI 开发者社区广泛采用,每月下载量约为 10 万次,Hub 上的 SetFit 模型已有 [1600](https://huggingface.co/models?library=setfit) 个之多,且平均日增量约为 4 个左右。
## 加速!
本文,我们将解释如何用 🤗 [Optimum Intel](https://github.com/huggingface/optimum-intel) 优化你的 SetFit 模型,从而在英特尔 CPU 上实现 **7.8x** 的推理加速。我们还将展示如何轻松对模型进行训后量化,从而实现巨大的吞吐增益。有了这些技术,用户可在英特尔至强 CPU 上部署生产级的 SetFit 解决方案。
[Optimum Intel](https://github.com/huggingface/optimum-intel) 是一个开源库,可在英特尔硬件上对由 Hugging Face 库构建的端到端流水线进行加速。 Optimum Intel 实现了多种模型加速技术,如低比特量化、模型权重剪枝、蒸馏以及运行时加速。
[Optimum Intel](https://github.com/huggingface/optimum-intel) 的运行时及各种优化都充分利用了英特尔® AVX-512、矢量神经网络指令 (VNNI) 以及最新的英特尔® 先进矩阵扩展(英特尔® AMX)以对模型进行极致加速。具体来说,英特尔在每个 CPU 核中都内置了 [bfloat16](https://en.wikipedia.org/wiki/Bfloat16_floating-point_format) (bf16) 和 int8 GEMM 加速器,以加速深度学习训练和推理工作负载。尤其值得一提的是,PyTorch 2.0 和 [Intel Extension for PyTorch](https://github.com/intel/intel-extension-for-pytorch) (IPEX) 中加入了 AMX 优化以进一步加速推理及训练。
使用 Optimum Intel 可以轻松对各种预训练模型进行加速,你可在[此处](https://huggingface.co/docs/optimum/main/en/intel/optimization_inc)找到很多例子。本文也附有一个 [notebook 版](https://github.com/huggingface/setfit/blob/main/notebooks/setfit-optimum-intel.ipynb),可供大家逐步演练。
## 第 1 步:使用 🤗 Optimum Intel 量化 SetFit 模型
在对 SetFit 模型进行优化时,我们会使用[英特尔神经压缩器](https://www.intel.com/content/www/us/en/developer/tools/oneapi/neural-compressor.html) (INC) 对模型进行量化,其已集成入 Optimum Intel。
**量化**是一种非常流行的深度学习模型优化技术,可用于提高推理速度。它通过将一组高精度数值转换为较低位宽的数据类型(如 INT8)。从而最大限度地降低神经网络的权重和/或激活所需的位数。另外,由于位宽较低,其计算速度也可能会更快。
本文,我们将使用训后静态量化(PTQ)。PTQ 仅需少量未标注校准数据,无需任何训练即可在保持模型的准确性的同时减低推理时的内存占用并降低延迟。首先请确保你已安装所有必要的库,同时确保 Optimum Intel 版本至少为 `1.14.0`(因为 PTQ 功能是从该版本开始引入的):
```bash
pip install --upgrade-strategy eager optimum[ipex]
```
### 准备校准数据集
校准数据集应能在数据分布上较好代表未见数据。一般来说,准备 100 个样本就足够了。在本例中,我们使用的是 `rotten_tomatoes` 数据集,其是一个电影评论数据集,与我们的目标数据集 `sst2` 类似。
首先,我们从该数据集中随机加载 100 个样本。然后,为了准备量化数据集,我们需要对每个样本进行标注。我们不需要 `text` 和 `label` 列,因此将其删除。
```python
calibration_set = load_dataset("rotten_tomatoes", split="train").shuffle(seed=42).select(range(100))
def tokenize(examples):
return tokenizer(examples["text"], padding="max_length", max_length=512, truncation=True)
tokenizer = setfit_model.model_body.tokenizer
calibration_set = calibration_set.map(tokenize, remove_columns=["text", "label"])
```
### 量化
量化前,先要配置所需的量化方案,本例中为**静态训后量化**,再使用 `optimum.intel` 在校准数据集上运行量化:
```python
from optimum.intel import INCQuantizer
from neural_compressor.config import PostTrainingQuantConfig
setfit_body = setfit_model.model_body[0].auto_model
quantizer = INCQuantizer.from_pretrained(setfit_body)
optimum_model_path = "/tmp/bge-small-en-v1.5_setfit-sst2-english_opt"
quantization_config = PostTrainingQuantConfig(approach="static", backend="ipex", domain="nlp")
quantizer.quantize(
quantization_config=quantization_config,
calibration_dataset=calibration_set,
save_directory=optimum_model_path,
batch_size=1,
)
tokenizer.save_pretrained(optimum_model_path)
```
就这样!现在,我们有了一个量化版的 SetFit 模型。下面,我们对其进行测试。
## 第 2 步:推理基准测试
我们在 [notebook](https://github.com/huggingface/setfit/blob/main/notebooks/setfit-optimum-intel.ipynb) 中写了一个 `PerformanceBenchmark` 类,用于计算模型延迟和吞吐量,并用于测量模型准确度。我们现在用它来对以下三种配置进行基准测试:
- 使用 `PyTorch` 和 `🤗 Transformers` 库对 `fp32` 模型进行推理。
- 使用 [`Intel Extension for PyTorch`](https://github.com/intel/intel-extension-for-pytorch) (IPEX) 对模型进行 `bf16` 推理,同时使用 `TorchScript` 对模型进行图优化。
- 使用 `Optimum Intel` 对 `int8` 量化模型进行推理。
加载测试数据集 `sst2`,并使用 PyTorch 和 🤗 Transformers 库运行基准测试:
```python
from datasets import load_dataset
from setfit import SetFitModel
test_dataset = load_dataset("SetFit/sst2")["validation"]
model_path = "dkorat/bge-small-en-v1.5_setfit-sst2-english"
setfit_model = SetFitModel.from_pretrained(model_path)
pb = PerformanceBenchmark(
model=setfit_model,
dataset=test_dataset,
optim_type="bge-small (transformers)",
)
perf_metrics = pb.run_benchmark()
```
第二个基准测试,我们将使用 bf16 精度和 TorchScript 两种优化手段,并使用 [IPEX](https://github.com/intel/intel-extension-for-pytorch) 优化库。要使用 IPEX,我们只需导入 IPEX 库并对模型应用 `ipex.optimize()`,在本例中,目标模型是 SetFit 的模型体:
```python
dtype = torch.bfloat16
body = ipex.optimize(setfit_model.model_body, dtype=dtype)
```
使用 `TorchScript` 进行图优化时,我们根据模型的最大输入长度生成随机序列,并从分词器的词汇表中采样词汇:
```python
tokenizer = setfit_model.model_body.tokenizer
d = generate_random_sequences(batch_size=1, length=tokenizer.model_max_length, vocab_size=tokenizer.vocab_size)
body = torch.jit.trace(body, (d,), check_trace=False, strict=False)
setfit_model.model_body = torch.jit.freeze(body)
```
最后,我们对量化的 Optimum 模型运行基准测试。我们首先定义一个 SetFit 模型的包装类,该包装类在推理时会自动插入量化模型体(而不是原始模型体)。然后,我们用这个包装类跑基准测试。
```python
from optimum.intel import IPEXModel
class OptimumSetFitModel:
def __init__(self, setfit_model, model_body):
model_body.tokenizer = setfit_model.model_body.tokenizer
self.model_body = model_body
self.model_head = setfit_model.model_head
optimum_model = IPEXModel.from_pretrained(optimum_model_path)
optimum_setfit_model = OptimumSetFitModel(setfit_model, model_body=optimum_model)
pb = PerformanceBenchmark(
model=optimum_setfit_model,
dataset=test_dataset,
optim_type=f"bge-small (optimum-int8)",
model_path=optimum_model_path,
autocast_dtype=torch.bfloat16,
)
perf_metrics.update(pb.run_benchmark())
```
## 结果
<p align="center">
<img src="assets/178_setfit_optimum_intel/latency.png" width=500>
</p>
<p align="center">
<em>精度与延迟关系图(batch size=1)</em>
</p>
| | bge-small (transformers) | bge-small (ipex-bfloat16) | bge-small (optimum-int8) |
|----------------------|---------------------|---------------------------|---------------------------|
| 模型大小 | 127.32 MB | 63.74 MB | 44.65 MB |
| 测试集准确率 | 88.4% | 88.4% | 88.1% |
| 延迟 (bs=1) | 15.69 +/- 0.57 ms | 5.67 +/- 0.66 ms | 4.55 +/- 0.25 ms |
batch size 为 1 时,我们的优化模型将延迟降低了 **3.45 倍**。请注意,此时准确率几乎没有下降!另外值得一提的是,模型大小缩小了 **2.85x**。
<p align="center">
<img src="assets/178_setfit_optimum_intel/throughput.png" width=500>
</p>
我们将焦点转向不同 batch size 下的吞吐量。这里,我们获得了更大的加速。如果比较最高吞吐量(不限 batch size),优化后的模型**比原始 transformers fp32 模型高 7.8 倍!**
## 总结
本文,我们展示了如何使用 🤗 Optimum Intel 中的量化功能来优化 SetFit 模型。在轻松快速地对模型完成训后量化后,我们观察到在准确度损失很小的情况下,推理吞吐量增加了 **7.8 倍**。用户可以使用这种优化方法在英特尔至强 CPU 上轻松部署任何现有 SetFit 模型。
## 参考文献
* Lewis Tunstall, Nils Reimers, Unso Eun Seo Jo, Luke Bates, Daniel Korat, Moshe Wasserblat, Oren Pereg, 2022. "Efficient Few-Shot Learning Without Prompts". https://arxiv.org/abs/2209.11055
| 5 |
0 | hf_public_repos/blog | hf_public_repos/blog/zh/reformer.md | ---
title: "Reformer 模型 - 突破语言建模的极限"
thumbnail: /blog/assets/03_reformer/thumbnail.png
authors:
- user: patrickvonplaten
translators:
- user: MatrixYao
- user: zhongdongy
proofreader: true
---
# Reformer 模型 - 突破语言建模的极限
<a href="https://colab.research.google.com/github/patrickvonplaten/blog/blob/main/notebooks/03_reformer.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt=" 在 Colab 中打开 "/></a>
## Reformer 如何在不到 8GB 的内存上训练 50 万个词元
[Kitaev、Kaiser 等人于 20202 年引入的 Reformer 模型](https://arxiv.org/pdf/2001.04451.pdf) 是迄今为止长序列建模领域内存效率最高的 transformer 模型之一。
最近,人们对长序列建模的兴趣激增,仅今年一年,就涌现出了大量的工作,如 [Beltagy 等人的工作 (2020) ](https://arxiv.org/abs/2004.05150)、[Roy 等人的工作 (2020) ](https://arxiv.org/abs/2003.05997)、[Tay 等人的工作](https://arxiv.org/abs/2002.11296) 以及 [Wang 等人的工作](https://arxiv.org/abs/2006.04768) 等等。长序列建模背后的动机是,NLP 中的许多任务 (例如 _摘要、问答_ ) 要求模型处理更长的序列,这些序列长度超出了 BERT 等模型的处理能力。在需要模型处理长输入序列的任务中,长序列模型无需对输入序列进行裁剪以避免内存溢出,因此已被证明优于标准的 **BERT 类模型** ( _见_ [Beltagy 等人 2020 年的工作](https://arxiv.org/abs/2004.05150))。
Reformer 能够一次处理多达 50 万个词元,从而突破了长序列建模的极限 (具体可参见本 [笔记本](https://github.com/patrickvonplaten/notebooks/blob/master/PyTorch_Reformer.ipynb))。相形之下,传统的 `bert-base-uncased` 模型最长仅支持 512 个词元。在 Reformer 中,标准 transformer 架构的每个部分都经过重新设计,以最小化内存需求,并避免显著降低性能。
内存的改进来自于 Reformer 作者向 transformer 世界引入的 **4** 大特性:
1. **Reformer 自注意力层** - _如何在不受限于本地上下文的情况下高效地实现自注意力机制?_
2. **分块前馈层** - _如何更好地对大型前馈层的时间和内存进行权衡?_
3. **可逆残差层** - _如何聪明地设计残差架构以大幅减少训练中的内存消耗?_
4. **轴向位置编码 (Axial Positional Encodings)** - _如何使位置编码可用于超长输入序列?_
本文的目的是 **深入** 阐述 Reformer 的上述四大特性。虽然这四个特性目前是用在 Reformer 上的,但其方法是通用的。因此,读者不应被此束缚,而应该多思考在哪些情况下可以把这四个特性中的某一个或某几个应用于其他的 transformer 模型,以解决其问题。
下文四个部分之间的联系很松散,因此可以单独阅读。
Reformer 已集成入 🤗Transformers 库。对于想使用 Reformer 的用户,建议大家阅读本文,以更好地了解该模型的工作原理以及如何正确配置它。文中所有公式都附有其在 transformers 中对应的 Reformer 配置项 ( _例如_ `config.<param_name>` ),以便读者可以快速关联到官方文档和配置文件。
**注意**: _轴向位置编码_ 在官方 Reformer 论文中没有解释,但在官方代码库中广泛使用。本文首次深入阐释了轴向位置编码。
## 1. Reformer 自注意力层
Reformer 使用了两种特殊的自注意力层: _局部_ 自注意力层和 LSH (Locality Sensitive Hashing,局部敏感哈希, _LSH_ ) 自注意力层。
在介绍新的自注意力层之前,我们先简要回顾一下传统的自注意力,其由 Vaswani 等人在其 [2017 年的论文](https://arxiv.org/abs/1706.03762) 中引入。
本文的符号及配色与 [《图解 transformer》](https://jalammar.github.io/illustrated-transformer/) 一文一致,因此强烈建议读者在阅读本文之前,先阅读《图解 transformer》一文。
**重要**: 虽然 Reformer 最初是为了因果自注意力而引入的,但它也可以很好地用于双向自注意力。本文在解释 Reformer 的自注意力时,将其用于 _双向_ 自注意力。
### 全局自注意力回顾
Transformer 模型的核心是 **自注意力** 层。现在,我们回顾一下传统的自注意力层,这里称为 **全局自注意力** 层。首先我们假设对嵌入向量序列 $\mathbf{X} = \mathbf{x}_1, \ldots, \mathbf{x}_n$ 执行一个 transformer 层,该序列中的每个向量 $\mathbf{x}_{i}$ 的维度为 `config.hidden_size` , _即_ $d_h$。
简而言之,全局自注意力层将 $\mathbf{X}$ 投影到查询矩阵、键矩阵和值矩阵: $\mathbf{Q}$、$\mathbf{K}$、$\mathbf{V}$ 并使用 _softmax_ 计算最终输出 $\mathbf{Z}$,如下所示:
$\mathbf{Z} = \text{SelfAttn}(\mathbf{X}) = \text{softmax}(\mathbf{Q}\mathbf{K}^T) \mathbf{V}$,其中 $\mathbf{Z}$ 的维度为 $d_h \times n$ (为简单起见,此处省略了键归一化因子和输出映射权重 $\mathbf{W}^{O}$)。有关完整 transformer 操作的更多详细信息,请参阅 [《图解 transformer》](https://jalammar.github.io/illustrated-transformer/) 一文。
下图给出了 $n=16,d_h=3$ 情况下的操作:

请注意,本文所有示意图都假设 `batch_size` 和 `config.num_attention_heads` 为 1。为了便于稍后更好地解释 _LSH 自注意力_ ,我们还在图中标记出了一些向量, _如_ $\mathbf{x_3}$ 及其相应的输出向量 $\mathbf{z_3}$。图中的逻辑可以轻易扩展至多头自注意力 ( `config.num_attention_heads` > 1)。如需了解多头注意力,建议读者参阅 [《图解 transformer》](https://jalammar.github.io/illustrated-transformer/)。
敲个重点,对于每个输出向量 $\mathbf{z}_{i}$,整个输入序列 $\mathbf{X}$ 都需要参与其计算。内积张量 $\mathbf{Q}\mathbf{K}^T$ 的内存复杂度为 $\mathcal{O}(n^2)$,这事实上使得 transformer 模型的瓶颈在内存。
这也是为什么 `bert-base-cased` 的 `config.max_position_embedding_size` 只有 512 的原因。
### 局部自注意力
**局部自注意力** 是缓解 $\mathcal{O}(n^2)$ 内存瓶颈的一个显然的解决方案,它使我们能够以更低的计算成本建模更长的序列。在局部自注意力中,输入 $\mathbf{X} = \mathbf{X}_{1:n} = \mathbf{x}_{1}, \ldots, \mathbf{x}_{n}$ 被切成 $n_{c}$ 个块: $\mathbf{X} = \left[\mathbf{X}_{1:l_{c}}, \ldots, \mathbf{X} _{(n_{c} - 1) * l_{c} : n_{c} * l_{c}}\right]$,每块长度为 `config.local_chunk_length` , _即_ $l_{c}$,随后,对每个块分别应用全局自注意力。
继续以 $n=16,d_h=3$ 为例:

假设 $l_{c} = 4,n_{c} = 4$,此时,我们将分块注意力图示如下:

可以看出,我们对每个块分别执行了注意力操作 $\mathbf{X} _{1:4},\mathbf{X}_ {5:8},\mathbf{X} _{9:12 },\mathbf{X}_ {13:16}$。
该架构的一个明显的缺点是: 一些输入向量无法访问其直接上下文, _例如_ ,我们的例子中的 $\mathbf{x} _9$ 无法访问 $\mathbf{x}_ {8}$,反之亦然。这是有问题的,因为这些词元无法在学习其向量表征时将其直接上下文的纳入考量。
一个简单的补救措施是用 `config.local_num_chunks_before` ( _即_ $n_{p}$) 以及 `config.local_num_chunks_after` ( _即_ $n_{a}$) 来扩充每个块,以便每个输入向量至少可以访问 $n_{p}$ 个先前输入块及 $n_{a}$ 个后续输入块。我们可将其理解为重叠分块,其中 $n_{p}$ 和 $n_{a}$ 定义了每个块与其先前块和后续块的重叠量。我们将这种扩展的局部自注意力表示如下:
$$\mathbf{Z}^{\text{loc}} = \left[\mathbf{Z}_{0:l_{c}}^{\text{loc}}, \ldots, \mathbf{Z}_{(n_{c} - 1) * l_{c} + 1 : n_{c} * l_{c}}^{\text{loc}}\right],$$
其中
$$\mathbf{Z}_{l_{c} * (i - 1) + 1 : l_{c} * i}^{\text{loc}} = \text{SelfAttn}(\mathbf{X}_ {l_{c} * (i - 1 - n_{p}) + 1: l_{c} * (i + n_{a})})\left[n_{p} * l_{c}: -n_{ a} * l_{c}\right], \forall i \in \{1, \ldots, n_{c} \}$$
好吧,这个公式看起来有点复杂,我们稍微分析一下。在 Reformer 的自注意力层中,$n_{a}$ 通常设为 0,$n_{p}$ 设为 1,我们据此重写 $i = 1$ 时的公式:
$$\mathbf{Z}_{1:l_{c}}^{\text{loc}} = \text{SelfAttn}(\mathbf{X}_{-l_{c} + 1: l_{c}})\left[l_{c}:\right]$$
我们注意到这里有一个循环关系,因此第一个块也可以关注最后一个块。我们再次图解一下这种增强的局部关注算法。我们先按块找到其对应的窗口,并在其上应用自注意力,然后仅保留中心输出段作为本块的输出。

最后,将相应的输出串接到 $\mathbf{Z}^{\text{loc}}$ 中,如下所示:

请注意,在实现局部自注意力时,为了计算效率,我们并不会像图中一样先计算全部输出并随后 _丢弃_ 一部分。图中红叉所示的地方仅用于说明,实际并不会产生计算行为。
这里需要注意的是,扩展每个分块自注意力函数的输入向量可以使得 _每个_ 输出向量 $\mathbf{z}_{i}$ 都能够学到更好的向量表征。以图中的向量为例,每个输出向量 $\mathbf{z}_{5}^{\text{loc}},\mathbf{z}_{6}^{\text{loc}},\mathbf{z}_{7}^{\text{loc}},\mathbf{z}_{8}^{\text{loc}}$ 都可以将 $\mathbf{X}_{1:8}$ 的所有输入向量纳入考量以学到更好的表征。
内存消耗上的降低也是显而易见的: $\mathcal{O}(n^2)$ 的内存复杂度被分解到段,因此总内存复杂度减少为 $\mathcal{O}(n_{c} * l_{c}^2) = \mathcal{O}(n * l_{c})$。
这种增强的局部自注意力比普通的局部自注意力架构更好,但仍然存在一个主要缺陷,因为每个输入向量只能关注预定义大小的局部上下文。对于不需要 transformer 模型学习输入向量之间的远程依赖关系的 NLP 任务 ( _例如_ 语音识别、命名实体识别以及短句子的因果语言建模) 而言,可能不是一个大问题。但还有许多 NLP 任务需要模型学习远程依赖关系,因此局部自注意力在这些任务下可能会导致显著的性能下降, _如_ :
- _问答_ : 模型必须学习问题词元和相关答案词元之间的关系,这些词元很可能并不相邻;
- _多项选择_ : 模型必须将多个答案词元段相互比较,这些答案词元段通常隔得比较远;
- _摘要_ : 模型必须学习长序列的上下文词元和较短的摘要词元序列之间的关系,而上下文和摘要之间的相关关系很可能无法通过局部自注意力来捕获。
- ……
局部自注意力本身很可能不足以让 transformer 模型学习输入向量 (词元) 彼此之间的相关关系。
因此,Reformer 额外采用了一个近似全局自注意力的高效自注意力层,称为 _LSH 自注意力_ 。
### LSH 自注意力
鉴于我们已经了解了局部自注意力的工作原理,下面我们继续尝试一下可能是 Reformer 中最具创新性的算法改进: **LSH 自注意力**。
LSH 自注意力的设计目标是在效果上接近全局自注意力,而在速度与资源消耗上与局部自注意力一样高效。
LSH 自注意力因依赖于 Andoni 等人于 2015 年提出的 [LSH 算法](https://arxiv.org/abs/1509.02897) 而得名。
LSH 自注意力源于以下洞见: 如果 $n$ 很大,则对每个查询向量而言,其对应的输出向量 $\mathbf{z}_{i}$ 作为所有 $\mathbf{V}$ 的线性组合,其中应只有极少数几个 $\mathbf{v}_{i}$ 的权重比其他大得多。也就是说对 $\mathbf{Q}\mathbf{K}^T$ 注意力点积作 softmax 产生的权重矩阵的每一行应仅有极少数的值远大于 0。
我们展开讲讲: 设 $\mathbf{k}_{i} \in \mathbf{K} = \left[\mathbf{k}_1, \ldots, \mathbf{k}_n \right]^T$ 和 $\mathbf{q}_{i} \in \mathbf{Q} = \left[\mathbf{q}_1, \ldots, \mathbf{q}_n\right]^T$ 分别为键向量和查询向量。对于每个 $\mathbf{q}_{i}$,可以仅用那些与 $\mathbf{q}_{i}$ 具有高余弦相似度的 $\mathbf{k}_{j}$ 的键向量来近似计算 $\text{softmax}(\mathbf{q}_{i}^T \mathbf{K}^T)$ 。这是因为 softmax 函数对较大输入值的输出会呈指数级增加。听起来没毛病,那么下一个问题就变成了如何高效地找到每个 $\mathbf{q}_{i}$ 的高余弦相似度键向量集合。
首先,Reformer 的作者注意到共享查询投影和键投影: $\mathbf{Q} = \mathbf{K}$ 并不会影响 transformer 模型 ${}^1$。现在,不必为每个查询向量 $q_i$ 找到其高余弦相似度的键向量,而只需计算查询向量彼此之间的余弦相似度。这一简化很重要,因为查询向量之间的余弦相似度满足传递性: 如果 $\mathbf{q}_{i}$ 与 $\mathbf{q}_{j}$ 和 $\mathbf{q}_{k}$ 都具有较高的余弦相似度,则 $\mathbf{q}_{j}$ 与 $\mathbf{q}_{k}$ 也具有较高的余弦相似度。因此,可以将查询向量聚类至不同的桶中,使得同一桶中的所有查询向量彼此的余弦相似度较高。我们将 $C_{m}$ 定义为第 _m_ 组位置索引,其中装的是属于同一个桶的所有查询向量: $C_{m} = { i | \mathbf{q}_{i} \in \text{第 m 簇}}$,同时我们定义桶的数量 `config.num_buckets` , _即_ $n_{b}$。
对每个索引 $C_{m}$ 对应的查询向量桶内的查询向量 $\mathbf{q}_{i}$,我们可以用 softmax 函数 $\text{softmax}(\mathbf{Q}_{i \in C_{m}} \mathbf{Q}^T_{i \in C_{m}})$ 通过共享查询和键投影来近似全局自注意力的 softmax 函数 $\text{softmax}(\mathbf{q}_{i}^T \mathbf{Q}^T)$。
其次,作者利用 **LSH** 算法将查询向量聚类到预定义的 $n_{b}$ 个桶 中。这里,LSH 算法是理想之选,因为它非常高效,且可用于近似基于余弦相似度的最近邻算法。对 LSH 进行解释超出了本文的范围,我们只要记住,对向量 $\mathbf{q}_{i}$,LSH 算法将其索引至 $n_{b}$ 个预定义桶中的某个桶, _即_ $\text{LSH}(\mathbf{q}_{i}) = m$ 其中 $i \in {1, \ldots, n}$,$m \in {1, \ldots, n_{b}}$。
还用前面的例子,我们有:

接着,可以注意到,将所有查询向量聚类至 $n_{b}$ 个桶中后,我们可以将输入向量 $\mathbf{x}_1, \ldots, \mathbf{x}_n$ 按其对应的索引 $C_{m}$ 进行重排 ${}^2$,以便共享查询 - 键自注意力可以像局部注意力一样分段应用。
我们用例子再解释一下,假设在 `config.num_buckets=4` , ` config.lsh_chunk_length=4` 时重排输入向量 $\mathbf{X} = \mathbf{x}_1, …, \mathbf{x}_{16}$。上图已将每个查询向量 $\mathbf{q}_1, \ldots, \mathbf{q}_{16}$ 分配给簇 $\mathcal{C}_{1}、\mathcal{C}_{2}、\mathcal{C}_{3}、\mathcal{C}_{4}$ 中的某一个。现在,对其对应的输入向量 $\mathbf{x}_1, \ldots, \mathbf{x}_{16}$ 进行重排,并将重排后的输入记为 $\mathbf{X'}$:

对每个输入向量,仅需在簇内进行自注意力计算即可,因此每个输入向量对应的输出向量可计算如下: $\mathbf{Z}^{\text{LSH}}_{i \in \mathcal{C}_m} = \text{SelfAttn}_{\mathbf{Q}=\mathbf{K}}(\mathbf{X}_{i \in \mathcal{C}_m})$。
我们再次图解一下该过程:

可以看出,自注意力函数的运算矩阵大小各不相同,这种情况比较麻烦,因为 GPU 和 TPU 无法高效并行处理不同尺寸的矩阵运算。
为了进一步解决高效计算的问题,可以借鉴局部注意力的方法,对重排后的输入进行分块,以使每个块的大小均为 `config.lsh_chunk_length` 。通过对重排后的输入进行分块,一个桶可能会被分成两个不同的块。为了解决这个问题,与局部自注意力一样,在 LSH 自注意力中,每个块除了自身之外还关注其前一个块 `config.lsh_num_chunks_before=1` ( `config.lsh_num_chunks_after` 通常设置为 0)。这样,我们就可以大概率确保桶中的所有向量相互关注 ${}^3$。
总而言之,对于所有块 $k \in {1, \ldots, n_{c}}$,LSH 自注意力可以如下表示:
$$ \mathbf{Z’}_{l_ {c} * k + 1:l_{c} *(k + 1)}^{\text{LSH}} = \text{SelfAttn}_{\mathbf{Q} = \mathbf{K}}(\mathbf{X’}_{l_{c} * (k + 1): l_{c} *(k + 1)})\left[l_{c}:\right] $$
其中 $\mathbf{X'}$ 和 $\mathbf{Z'}$ 是按照 LSH 分桶进行重排后的输入和输出向量。公式有点复杂,我们还是画个图以帮助大家理解。
这里,我们对上图中的重排向量 $\mathbf{X'}$ 进行分块,并分别计算每块的共享查询 - 键自注意力。

最后,将输出 $\mathbf{Z'}^{\text{LSH}}$ 重排回原顺序。

这里还要提到的一个重要特征是,可以通过并行运行 LSH 自注意力 `config.num_hashes` (即 $n_{h}$) 次来提高 LSH 自注意力的准确性,其中每次使用不同的随机 LSH 哈希。通过设置 `config.num_hashes > 1` ,对于每个 $i$,会计算多个输出向量 $\mathbf{z}^{\text{LSH}, 1}_{i}, \ldots , \mathbf{z}^{\text{LSH}, n_{h}}_{i}$。随后,可以对它们进行加权求和: $\mathbf{z}^{\text{LSH}}_{i} = \sum_k^{n_{h}} \mathbf{Z}^{\text{LSH}, k}_{i} * \text{weight}^k_i$,这里 $\text{weight}^k_i$ 表示第 $k$ 轮哈希的输出向量 $\mathbf{z}^{\text{LSH}, k}_{i}$ 与其他哈希轮次相比的重要度,其应与其对应输出的 softmax 归一化系数呈指数正比关系。这一设计背后的直觉是,如果查询向量 $\mathbf{q}_{i}^{k}$ 与其对应块中的所有其他查询向量具有较高的余弦相似度,则该块的 softmax 归一化系数往往很大,因此相应的输出向量 $\mathbf{q}_{i}^{k}$ 应该能更好地近似全局注意力,因此其理应比 softmax 归一化系数较小的哈希轮次所产生的输出向量获得更高的权重。更多详细信息,请参阅 [该论文](https://arxiv.org/pdf/2001.04451.pdf) 的附录 A。在我们的例子中,多轮 LSH 自注意力示意图如下。

打完收工!至此,我们了解了 LSH 自注意力在 Reformer 中是如何工作的。
说回内存复杂度,该方法有两个可能的瓶颈点: 点积所需的内存: $\mathcal{O}(n_{h} * n_{c} * l_{c}^2) = \mathcal{O}(n * n_{h} * l_{c})$ 以及 LSH 分桶所需的内存: $\mathcal{O}(n * n_{h} * \frac{n_{b}}{2})$ 其中 $l_{c}$ 是块长度。因为对于大的 $n$ 而言,桶的数量 $\frac{n_{b}}{2}$ 的增长速度远远快于块长度 $l_{c}$,因此用户可以继续对存储桶的数量 `config.num_buckets` 进行分解,详见 [此处](https://huggingface.co/transformers/model_doc/reformer.html#lsh-self-attention)。
我们快速总结一下:
1. 我们希望利用 softmax 运算仅对极少数键向量赋予重要权重的先验知识来对全局注意力进行近似。
2. 如果键向量等于查询向量,这意味着 _对于每个_ 查询向量 $\mathbf{q}_{i}$,softmax 只需给与其余弦相似度高的其他查询向量赋予重要权重就行了。
3. 这种关系是对称的,也就是说,如果 $\mathbf{q}_{j}$ 与 $\mathbf{q}_{i}$ 相似,则 $\mathbf{q}_{j}$ 也与 $\mathbf{q}_{i}$ 相似,因此我们可以在计算自注意力之前对输入进行全局聚类。
4. 我们对输入按簇进行重排,并对重排后的输入计算局部自注意力,最后将输出重新恢复为原顺序。
---
${}^{1}$ 作者进行了一些初步实验,确认共享查询 - 键自注意力的表现与标准自注意力大体一致。
${}^{2}$ 更准确地说,对存储桶中的查询向量根据其原始顺序进行排序。举个例子, _假如_ 向量 $\mathbf{q}_1, \mathbf{q}_3, \mathbf{q}_7$ 全部散列到存储桶 2,则存储桶 2 中向量的顺序仍应是先 $\mathbf{q}_1$,后跟 $\mathbf{q}_3$ 和 $\mathbf{q}_7$。
${}^3$ 顺带说明一下,作者在查询向量 $\mathbf{q}_{i}$ 上放了一个掩码,以防止向量关注本身。因为向量与其自身的余弦相似度总是大于等于其与其他向量的余弦相似度,所以强烈不建议共享查询 - 键自注意力中的查询向量关注自身。
### 基准测试
Transformers 最近增加了基准测试相关的代码,你可参阅 [此处](https://github.com/huggingface/notebooks/blob/main/examples/benchmark.ipynb) 以获取更详细的说明。
为了展示局部 LSH 自注意力可以节省多少内存,我们在不同的 `local_attn_chunk_length` 和 `lsh_attn_chunk_length` 上对 Reformer 模型 `google/reformer-enwik8` 上进行了基准测试。你可以从 [此处](https://huggingface.co/google/reformer-enwik8) 找到更详细的有关 `google/reformer-enwik8` 模型的默认配置和用法信息。
我们先进行一些必要的导入和安装。
```
#@title Installs and Imports
# pip installs
!pip -qq install git+https://github.com/huggingface/transformers.git
!pip install -qq py3nvml
from transformers import ReformerConfig, PyTorchBenchmark, PyTorchBenchmarkArguments
```
首先,我们测试一下在 Reformer 模型上使用 _全局_ 自注意力的内存使用情况。这可以通过设置 `lsh_attn_chunk_length` = `local_attn_chunk_length` = 8192 来达成,此时,对于所有小于或等于 8192 的输入序列,模型事实上就回退成全局自注意力了。
```
config = ReformerConfig.from_pretrained("google/reformer-enwik8", lsh_attn_chunk_length=16386, local_attn_chunk_length=16386, lsh_num_chunks_before=0, local_num_chunks_before=0)
benchmark_args = PyTorchBenchmarkArguments(sequence_lengths=[2048, 4096, 8192, 16386], batch_sizes=[1], models=["Reformer"], no_speed=True, no_env_print=True)
benchmark = PyTorchBenchmark(configs=[config], args=benchmark_args)
result = benchmark.run()
```
HBox(children=(FloatProgress(value=0.0, description='Downloading', max=1279.0, style=ProgressStyle(description…
1 / 1
Doesn't fit on GPU. CUDA out of memory. Tried to allocate 2.00 GiB (GPU 0; 11.17 GiB total capacity; 8.87 GiB already allocated; 1.92 GiB free; 8.88 GiB reserved in total by PyTorch)
==================== INFERENCE - MEMORY - RESULT ====================
--------------------------------------------------------------------------------
Model Name Batch Size Seq Length Memory in MB
--------------------------------------------------------------------------------
Reformer 1 2048 1465
Reformer 1 4096 2757
Reformer 1 8192 7893
Reformer 1 16386 N/A
--------------------------------------------------------------------------------
输入序列越长,输入序列和峰值内存使用之间的平方关系 $\mathcal{O}(n^2)$ 越明显。可以看出,实际上,需要更长的输入序列才能清楚地观察到输入序列翻倍会导致峰值内存使用量增加四倍。
对使用全局注意力的 `google/reformer-enwik8` 模型而言,序列长度超过 16K 内存就溢出了。
现在,我们使用模型的默认参数以使能 _局部 LSH_ 自注意力。
```
config = ReformerConfig.from_pretrained("google/reformer-enwik8")
benchmark_args = PyTorchBenchmarkArguments(sequence_lengths=[2048, 4096, 8192, 16384, 32768, 65436], batch_sizes=[1], models=["Reformer"], no_speed=True, no_env_print=True)
benchmark = PyTorchBenchmark(configs=[config], args=benchmark_args)
result = benchmark.run()
```
1 / 1
Doesn't fit on GPU. CUDA out of memory. Tried to allocate 2.00 GiB (GPU 0; 11.17 GiB total capacity; 7.85 GiB already allocated; 1.74 GiB free; 9.06 GiB reserved in total by PyTorch)
Doesn't fit on GPU. CUDA out of memory. Tried to allocate 4.00 GiB (GPU 0; 11.17 GiB total capacity; 6.56 GiB already allocated; 3.99 GiB free; 6.81 GiB reserved in total by PyTorch)
==================== INFERENCE - MEMORY - RESULT ====================
--------------------------------------------------------------------------------
Model Name Batch Size Seq Length Memory in MB
--------------------------------------------------------------------------------
Reformer 1 2048 1785
Reformer 1 4096 2621
Reformer 1 8192 4281
Reformer 1 16384 7607
Reformer 1 32768 N/A
Reformer 1 65436 N/A
--------------------------------------------------------------------------------
不出所料,对于较长的输入序列,使用局部 LSH 自注意力机制的内存效率更高,对于本文使用的 11GB 显存 GPU 而言,模型直到序列长度为 32K 时,内存才耗尽。
## 2. 分块前馈层
基于 transformer 的模型通常在自注意力层之后会有一个非常大的前馈层。该层可能会占用大量内存,有时甚至成为模型主要的内存瓶颈。Reformer 论文中首次引入了前馈分块技术,以用时间换取内存。
### Reformer 中的分块前馈层
在 Reformer 中, _LSH_ 自注意力层或局部自注意力层通常后面跟着一个残差连接,我们可将其定义为 _transformer 块_ 的第一部分。更多相关知识,可参阅此 [博文](http://jalammar.github.io/illusterated-transformer/)。
_Transformer 块_ 第一部分的输出,称为 _归范化自注意力_ 输出,可以记为 $\mathbf{\overline{Z}} = \mathbf{Z} + \mathbf{X}$。在 Reformer 模型中,$\mathbf{Z}$ 为 $\mathbf{Z}^{\text{LSH}}$ 或 $\mathbf{Z}^\text{loc}$。
在我们的例子中,输入 $\mathbf{x}_1, \ldots, \mathbf{x}_{16}$ 的规范化自注意力输出图示如下:

_Transformer 块_ 的第二部分通常由两个前馈层 $^{1}$ 组成,其中 $\text{Linear}_{\text{int}}(\ldots)$ 用于将 $\mathbf{\overline{Z}}$ 映射到中间输出 $\mathbf{Y}_{\text{int}}$,$\text{Linear}_{\text{out}}(\ldots)$ 用于将中间输出映射为最终输出 $\mathbf{Y}_{\text{out}}$。我们将两个前馈层定义如下:
$$\mathbf{Y}_{\text{out}} = \text{Linear}_{\text{out}}(\mathbf{Y} _\text{int}) = \text{Linear}_{\text{out}}(\text{Linear}_{\text{int}}(\mathbf{\overline{Z}}))$$
敲重点!在数学上,前馈层在位置 $i$ 处的输出 $\mathbf{y}_{\text{out}, i}$ 仅取决于该位置的输入 $\mathbf{\overline{y}}_{i}$。与自注意力层相反,每个输出 $\mathbf{y}_{\text{out}, i}$ 与其他位置的输入 $\mathbf{\overline{y}}_{j \ne i}$ 完全独立。
$\mathbf{\overline{z}}_1, \ldots, \mathbf{\overline{z}}_{16}$ 的前馈层图示如下:

从图中可以看出,所有输入向量 $\mathbf{\overline{z}}_{i}$ 均由同一前馈层并行处理。
我们再观察一下前馈层的输出维度,看看有没有啥有意思的事情。在 Reformer 中,$\text{Linear}_{\text{int}}$ 的输出维度为 `config.feed_forward_size` , _即_ $d_ {f}$; 而 $\text{Linear}_{\text{out}}$ 的输出维度为 `config.hidden_size` , _即_ $d_ {h}$。
Reformer 作者观察到 $^{2}$,在 transformer 模型中,中间维度 $d_{f}$ 通常往往比输出维度 $d_{h}$ 大许多。这意味着尺寸为 $d_{f} \times n$ 的张量 $\mathbf{\mathbf{Y}}_\text{int}$ 占据了大量的内存,甚至可能成为内存瓶颈。
为了更好地感受维度的差异,我们将本文例子中的矩阵 $\mathbf{Y}_\text{int}$ 和 $\mathbf{Y}_\text{out}$ 图示如下:

很明显,张量 $\mathbf{Y} _\text{int}$ 比 $\mathbf{Y}_{\text{out}}$ 占用了更多的内存 (准确地说,多占 $\frac{d_{f}}{d_{h}} \times n$ 字节的内存)。但是,是否有必要存储完整的中间矩阵 $\mathbf{Y}_\text{int}$ ?并非如此,因为我们关心的实际上只有输出矩阵 $\mathbf{Y}_ \text{out}$。为了以速度换内存,我们可以对线性层计算进行分块,一次只处理一个块。定义 `config.chunk_size_feed_forward` 为 $c_{f}$,则分块线性层定义为 $\mathbf{Y}_{\text{out}} = \left[\mathbf{Y}_{\text{out}, 1: c_{f}}, \ldots, \mathbf{Y}_{\text{out}, (n - c_{f}): n}\right]$ 即 $\mathbf{Y}_{\text{out}, (c_{f} * i):(i * c_{f} + i)} = \text{Linear}_{\text{out}}( \text{Linear}_{\text{int}}(\mathbf{\overline{Z}}_{(c_{f} * i):(i * c_{f} + i)}))$。这么做意味着我们可以增量计算输出最后再串接在一起,这样可以避免将整个中间张量 $\mathbf{Y}_{\text{int}}$ 存储在内存中。
假设 $c_{f}=1$,我们把增量计算 $i=9$ 的过程图示如下:

当块大小为 1 时,必须完整存储在内存中的唯一张量是大小为 $16 \times d_{h}$ 的输入张量 $\mathbf{\overline{Z}}$,其中 $d_{h}$ 为 `config.hidden_size` 。而中间张量只需要存储大小为 $d_{f}$ 的 $\mathbf{y}_{\text{int}, i}$ 就可以了 $^{3}$。
最后,重要的是要记住, _分块线性层_ 与传统的完整线性层相比,其输出在数学上是等效的,因此可以应用于所有 transformer 线性层。因此,在某些场景下,可以考虑使用 `config.chunk_size_feed_forward` 在内存和速度之间进行更好的权衡。
---
${}^1$ 为了简单起见,我们省略了前馈层之前的层归一化操作。
${}^2$ 以 `bert-base-uncased` 为例,其中间维度 $d_{f}$ 是 3072,为输出维度 $d_{h}$ 的 4 倍。
${}^3$ 提醒一下,为清晰说明起见,本文假设输出 `config.num_attention_heads` 为 1,因此假设自注意力层的输出大小为 `config.hidden_size` 。
读者也可以在 🤗Transformers 的 [相应文档](https://huggingface.co/transformers/glossary.html#feed-forward-chunking) 中找到有关分块线性/前馈层的更多信息。
### 基准测试
我们测试一下使用分块前馈层可以节省多少内存。
```
#@title Installs and Imports
# pip installs
!pip -qq install git+https://github.com/huggingface/transformers.git
!pip install -qq py3nvml
from transformers import ReformerConfig, PyTorchBenchmark, PyTorchBenchmarkArguments
```
Building wheel for transformers (setup.py) ... [?25l[?25hdone
首先,我们将没有分块前馈层的默认 `google/reformer-enwik8` 模型与有分块前馈层的模型进行比较。
```
config_no_chunk = ReformerConfig.from_pretrained("google/reformer-enwik8") # no chunk
config_chunk = ReformerConfig.from_pretrained("google/reformer-enwik8", chunk_size_feed_forward=1) # feed forward chunk
benchmark_args = PyTorchBenchmarkArguments(sequence_lengths=[1024, 2048, 4096], batch_sizes=[8], models=["Reformer-No-Chunk", "Reformer-Chunk"], no_speed=True, no_env_print=True)
benchmark = PyTorchBenchmark(configs=[config_no_chunk, config_chunk], args=benchmark_args)
result = benchmark.run()
```
1 / 2
Doesn't fit on GPU. CUDA out of memory. Tried to allocate 2.00 GiB (GPU 0; 11.17 GiB total capacity; 7.85 GiB already allocated; 1.74 GiB free; 9.06 GiB reserved in total by PyTorch)
2 / 2
Doesn't fit on GPU. CUDA out of memory. Tried to allocate 2.00 GiB (GPU 0; 11.17 GiB total capacity; 7.85 GiB already allocated; 1.24 GiB free; 9.56 GiB reserved in total by PyTorch)
==================== INFERENCE - MEMORY - RESULT ====================
--------------------------------------------------------------------------------
Model Name Batch Size Seq Length Memory in MB
--------------------------------------------------------------------------------
Reformer-No-Chunk 8 1024 4281
Reformer-No-Chunk 8 2048 7607
Reformer-No-Chunk 8 4096 N/A
Reformer-Chunk 8 1024 4309
Reformer-Chunk 8 2048 7669
Reformer-Chunk 8 4096 N/A
--------------------------------------------------------------------------------
有趣的是,分块前馈层似乎在这里根本没有帮助。原因是 `config.feed_forward_size` 不够大,所以效果不明显。仅当序列长度较长 (4096) 时,才能看到内存使用量略有下降。
我们再看看如果将前馈层的大小增加 4 倍,并将注意力头的数量同时减少 4 倍,从而使前馈层成为内存瓶颈,此时峰值内存情形如何。
```
config_no_chunk = ReformerConfig.from_pretrained("google/reformer-enwik8", chunk_size_feed_forward=0, num_attention_{h}eads=2, feed_forward_size=16384) # no chuck
config_chunk = ReformerConfig.from_pretrained("google/reformer-enwik8", chunk_size_feed_forward=1, num_attention_{h}eads=2, feed_forward_size=16384) # feed forward chunk
benchmark_args = PyTorchBenchmarkArguments(sequence_lengths=[1024, 2048, 4096], batch_sizes=[8], models=["Reformer-No-Chunk", "Reformer-Chunk"], no_speed=True, no_env_print=True)
benchmark = PyTorchBenchmark(configs=[config_no_chunk, config_chunk], args=benchmark_args)
result = benchmark.run()
```
1 / 2
2 / 2
==================== INFERENCE - MEMORY - RESULT ====================
--------------------------------------------------------------------------------
Model Name Batch Size Seq Length Memory in MB
--------------------------------------------------------------------------------
Reformer-No-Chunk 8 1024 3743
Reformer-No-Chunk 8 2048 5539
Reformer-No-Chunk 8 4096 9087
Reformer-Chunk 8 1024 2973
Reformer-Chunk 8 2048 3999
Reformer-Chunk 8 4096 6011
--------------------------------------------------------------------------------
现在,对于较长的输入序列,可以看到峰值内存使用量明显减少。总之,应该注意的是,分块前馈层仅对于具有很少注意力头和较大前馈层的模型才有意义。
## 3. 可逆残差层
可逆残差层由 [N. Gomez 等人](https://arxiv.org/abs/1707.04585) 首先提出并应用在 _ResNet_ 模型的训练上以减少内存消耗。从数学上讲,可逆残差层与 _真正的_ 残差层略有不同,其不需要在前向传播期间保存激活,因此可以大大减少训练的内存消耗。
### Reformer 中的可逆残差层
我们首先研究为什么模型训练比推理需要更多的内存。
在模型推理时,所需的内存差不多等于计算模型中 **单个** 最大张量所需的内存。而在训练模型时,所需的内存差不多等于所有可微张量的 **总和**。
如果读者已经理解了深度学习框架中的自动微分的工作原理,对此就比较容易理解了。多伦多大学 Roger Grosse 的这些 [幻灯片](https://www.cs.toronto.edu/~rgrosse/courses/csc321_2018/slides/lec10.pdf) 对大家理解自动微分很有帮助。
简而言之,为了计算可微函数 ( _如_ 一层) 的梯度,自动微分需要函数输出的梯度以及函数的输入、输出张量。虽然梯度是可以动态计算并随后丢弃的,但函数的输入和输出张量 ( _又名_ 激活) 需要在前向传播过程中被保存下来,以供反向传播时使用。
我们具体看下 transformer 模型中的情况。Transformer 模型是由多个 transformer 层堆叠起来的。每多一个 transformer 层都会迫使模型在前向传播过程中保存更多的激活,从而增加训练所需的内存。
我们细看一下 transformer 层。Transformer 层本质上由两个残差层组成。第一个残差层是第 1) 节中解释的 _自注意力_ 机制,第二个残差层是第 2) 节中解释的 _线性层_ (或前馈层)。
使用与之前相同的符号,transformer 层的输入 _即_ $\mathbf{X}$ 首先被归一化 $^{1}$,然后经过自注意力层获得输出 $\mathbf{Z} = \text{SelfAttn}(\text{LayerNorm}(\mathbf{X}))$。为方便讨论,我们将这两层缩写为 $G$,即 $\mathbf{Z} = G(\mathbf{X})$。
接下来,将残差 $\mathbf{Z}$ 与输入相加 $\mathbf{\overline{Z}} = \mathbf{Z} + \mathbf{X}$,得到张量输入到第二个残差层 —— 两个线性层。$\mathbf{\overline{Z}}$ 经过第二个归一化层处理后,再经过两个线性层,得到 $\mathbf{Y} = \text{Linear}(\text{LayerNorm}(\mathbf{Z} + \mathbf{X}))$。我们将第二个归一化层和两个线性层缩写为 $F$ ,得到 $\mathbf{Y} = F(\mathbf{\overline{Z}})$。最后,将残差 $\mathbf{Y}$ 加到 $\mathbf{\overline{Z}}$ 上得到 transformer 层的输出 $\mathbf{\overline{Y}} = \mathbf{Y} + \mathbf{\overline{Z}}$。
我们仍以 $\mathbf{x}_1, \ldots, \mathbf{x}_{16}$ 为例对完整的 transformer 层进行图解。

_比如_ ,要计算自注意力块 $G$ 的梯度,必须事先知道三个张量: 梯度 $\partial \mathbf{Z}$、输出 $\mathbf{Z}$ 以及输入 $\mathbf{X}$。虽然 $\partial \mathbf{Z}$ 可以即时计算并随后丢弃,但 $\mathbf{Z}$ 和 $\mathbf{X}$ 必须在前向传播期间计算并保存下来,因为在反向传播期间比较难轻松地即时重新计算它们。因此,在前向传播过程中,大张量输出 (如查询 - 键点积矩阵 $\mathbf{Q}\mathbf{K}^T$ 或线性层的中间输出 $\mathbf{Y}^{\text{int}}$) 必须保存在内存中 $^{2}$。
此时,可逆残差层就有用了。它的想法相对简单: 残差块的设计方式使得不必保存函数的输入和输出张量,而在反向传播期间就轻松地对二者进行重新计算,这样的话在前向传播期间就无需将这些张量保存在内存中了。
这是通过两个输入流 $\mathbf{X}^{(1)}、\mathbf{X}^{(2)}$ 及两个输出流 $\mathbf{\overline {Y}}^{(1)}、\mathbf{\overline{Y}}^{(2)}$ 来实现的。第一个残差 $\mathbf{Z}$ 由第一个输出流 $\mathbf{Z} = G(\mathbf{X}^{(1)})$ 算得,然后其加到第二个输入流的输入上,即 $\mathbf{\overline{Z}} = \mathbf{Z} + \mathbf{X}^{(2)}$。类似地,再将残差 $\mathbf{Y} = F(\mathbf{\overline{Z}})$ 与第一个输入流相加。最终,两个输出流即为 $\mathbf{Y}^{(1)} = \mathbf{Y} + \mathbf{X}^{(1)}$、$\mathbf{Y}^{(2)} = \mathbf{ X}^{(2)} + \mathbf{Z} = \mathbf{\overline{Z}}$。
以 $\mathbf{x}_1, \ldots, \mathbf{x}_{16}$ 为例来图示可逆 transformer 层,如下:

可以看出,输出 $\mathbf{\overline{Y}}^{(1)}、\mathbf{\overline{Y}}^{(2)}$ 的计算方式与不可逆层 $\mathbf{\overline{Y}}$ 的计算方式非常相似,但在数学上又不同。Reformer 的作者在一些初步实验中观察到,可逆 transformer 模型的性能与标准 transformer 模型的性能相当。与标准 transformer 层的一个明显区别是有两个输入流和输出流 $^{3}$,这一开始反而稍微增加了前向传播所需的内存。但即使如此,我们还是强调双流架构至关重要,因为其在前向传播过程中无需保存任何激活。我们解释一下: 对于反向传播,可逆 treansformer 层必须计算梯度 $\partial G$ 和 $\partial F$。除了可即时计算的梯度 $\partial \mathbf{Y}$ 和 $\partial \mathbf{Z}$ 之外,为了计算 $\partial F$ 必须已知张量值 $\mathbf{Y}$、$\mathbf{\overline{Z}}$,为了计算 $\partial G$ 必须已知 $\mathbf{Z}$ 和 $\mathbf{X}^{(1)}$。
假设我们知道 $\mathbf{\overline{Y}}^{(1)},\mathbf{\overline{Y}}^{(2)}$,则从图中可以很容易看出,我们可以如下计算出 $\mathbf{X}^{(1)},\mathbf{X}^{(2)}$ 。$\mathbf{X}^{(1)} = F(\mathbf{\overline{Y}}^{(1)}) - \mathbf{\overline{Y}}^{(1)}$。$\mathbf{X}^{(1)}$ 计算出来了!然后,$\mathbf{X}^{(2)}$ 可以通过 $\mathbf {X}^{(2)} = \mathbf{\overline{Y}}^{(1)} - G(\mathbf{X}^{(1)})$ 算出。之后,$\mathbf{Z}$ 和 $\mathbf{Y}$ 的计算就简单了,可以通过 $\mathbf{Y} = \mathbf{\overline{Y}}^{(1)} - \mathbf{X}^{(1)}$ 和 $\mathbf{Z} = \mathbf{\overline{Y}}^{(2)} - \mathbf{X }^{(2)} 算出$。总结一下,仅需在前向传播期间存储 **最后一个** 可逆 transformer 层的输出 $\mathbf{\overline{Y}}^{(1)},\mathbf{\overline{Y}}^{(2)}$,所有其他层的激活就可以通过在反向传播期间使用 $G$ 和 $F$ 以及 $\mathbf {X}^{(1)}$ 和 $\mathbf{X}^{(2)}$ 推导而得。在反向传播期间,每个可逆 transformer 层用两次前向传播 $G$ 和 $F$ 的计算开销换取前向传播时不必保存任何激活。好买卖!
**注意**: 最近,主要的深度学习框架都支持了梯度检查点技术,以允许仅保存某些激活并在反向传播期间重计算尺寸较大的激活 (Tensoflow 代码见 [此处](https://www.tensorflow.org/api_docs/python/tf/recompute_grad),PyTorch 代码见 [此处](https://pytorch.org/docs/stable/checkpoint.html))。对于标准可逆层,这仍然意味着必须为每个 transformer 层保存至少一个激活,但通过定义哪些激活可以动态重新计算,能够节省大量内存。
---
$^{1}$ 在前两节中,我们省略了自注意力层和线性层之前的层归一化操作。读者应该知道 $\mathbf{X}$ 和 $\mathbf{\overline{Z}}$ 在输入自注意力层和线性层之前都分别经过层归一化处理。
$^{2}$ 在原始自注意力中,$\mathbf{Q}\mathbf{K}$ 的维度为 $n \times n$; 而在 _LSH 自注意力_ 或 _局部自注意力_ 层的维度为 $n \times l_{c} \times n_{h}$ 或 $n \times l_{c}$ 其中 $l_{c}$ 为块长度,$n_{h}$ 为哈希数。
$^{3}$ 第一个可逆 transformer 层的 $\mathbf{X}^{(2)}$ 等于 $\mathbf{X}^{(1)}$。
### 测试基准
为了测量可逆残差层的效果,我们将增加模型层数的同时比较 BERT 和 Reformer 的内存消耗。
```
#@title Installs and Imports
# pip installs
!pip -qq install git+https://github.com/huggingface/transformers.git
!pip install -qq py3nvml
from transformers import ReformerConfig, BertConfig, PyTorchBenchmark, PyTorchBenchmarkArguments
```
我们把标准 `bert-base-uncased` BERT 模型的层数从 4 增加到 12 ,同时测量其所需内存。
```
config_4_layers_bert = BertConfig.from_pretrained("bert-base-uncased", num_hidden_layers=4)
config_8_layers_bert = BertConfig.from_pretrained("bert-base-uncased", num_hidden_layers=8)
config_12_layers_bert = BertConfig.from_pretrained("bert-base-uncased", num_hidden_layers=12)
benchmark_args = PyTorchBenchmarkArguments(sequence_lengths=[512], batch_sizes=[8], models=["Bert-4-Layers", "Bert-8-Layers", "Bert-12-Layers"], training=True, no_inference=True, no_speed=True, no_env_print=True)
benchmark = PyTorchBenchmark(configs=[config_4_layers_bert, config_8_layers_bert, config_12_layers_bert], args=benchmark_args)
result = benchmark.run()
```
HBox(children=(FloatProgress(value=0.0, description='Downloading', max=433.0, style=ProgressStyle(description_…
1 / 3
2 / 3
3 / 3
==================== TRAIN - MEMORY - RESULTS ====================
--------------------------------------------------------------------------------
Model Name Batch Size Seq Length Memory in MB
--------------------------------------------------------------------------------
Bert-4-Layers 8 512 4103
Bert-8-Layers 8 512 5759
Bert-12-Layers 8 512 7415
--------------------------------------------------------------------------------
可以看出,BERT 层数每增加 1,其所需内存就会有超 400MB 的线性增长。
```
config_4_layers_reformer = ReformerConfig.from_pretrained("google/reformer-enwik8", num_hidden_layers=4, num_hashes=1)
config_8_layers_reformer = ReformerConfig.from_pretrained("google/reformer-enwik8", num_hidden_layers=8, num_hashes=1)
config_12_layers_reformer = ReformerConfig.from_pretrained("google/reformer-enwik8", num_hidden_layers=12, num_hashes=1)
benchmark_args = PyTorchBenchmarkArguments(sequence_lengths=[512], batch_sizes=[8], models=["Reformer-4-Layers", "Reformer-8-Layers", "Reformer-12-Layers"], training=True, no_inference=True, no_speed=True, no_env_print=True)
benchmark = PyTorchBenchmark(configs=[config_4_layers_reformer, config_8_layers_reformer, config_12_layers_reformer], args=benchmark_args)
result = benchmark.run()
```
1 / 3
2 / 3
3 / 3
==================== TRAIN - MEMORY - RESULTS ====================
--------------------------------------------------------------------------------
Model Name Batch Size Seq Length Memory in MB
--------------------------------------------------------------------------------
Reformer-4-Layers 8 512 4607
Reformer-8-Layers 8 512 4987
Reformer-12-Layers 8 512 5367
--------------------------------------------------------------------------------
另一方面,对于 Reformer 而言,每增加一层所带来的内存增量会显著减少,平均不到 100MB。因此 12 层的 `reformer-enwik8` 模型比 12 层的 `bert-base-uncased` 模型的内存需求更少。
## 4. 轴向位置编码
Reformer 使得处理超长输入序列成为可能。然而,对于如此长的输入序列,仅存储标准位置编码权重矩阵就需要超过 1GB 内存。为了避免如此大的位置编码矩阵,官方 Reformer 代码引入了 _轴向位置编码_ 。
**重要:** _官方论文中没有解释轴向位置编码,但通过阅读代码以及与作者讨论我们很好地理解了它。_
### Reformer 中的轴向位置编码
Transformer 需要位置编码来对输入序列中的单词顺序进行编码,因为自注意力层 _没有顺序的概念_ 。位置编码通常由一个简单的查找矩阵 $\mathbf{E} = \left[\mathbf{e}_1, \ldots, \mathbf{e}_{n_\text{max}}\right]$ 来定义,然后将位置编码向量 $\mathbf{e}_{i}$ 简单地加到 _第 i 个_ 输入向量上,即 $\mathbf{x}_{i} + \mathbf{e}_{i}$,以便模型可以区分输入向量 ( _即_ 词元) 位于位置 $i$ 还是位置$j$。对于每个输入位置,模型需要能够查找到相应的位置编码向量,因此 $\mathbf{E}$ 的维度由模型可以处理的最大输入序列长度 `config.max_position_embeddings` ( _即_ $n_\text{max}$) 以及输入向量的维度 `config.hidden_size` ( _即_ $d_{h}$) 共同决定。
假设 $d_{h}=4$,$n_\text{max}=49$,其位置编码矩阵如下图所示:

此处,我们仅展示位置编码 $\mathbf{e}_{1}$、$\mathbf{e}_{2}$ 及 $\mathbf{e}_{49}$,其维度 ( _即_ 高度) 为 4。
想象一下,我们想要在长度最长为 0.5M 个词元,输入向量维度 `config.hidden_size` 为 1024 的序列上训练 Reformer 模型 (请参阅 [此笔记本](https://github.com/patrickvonplaten/notebooks/blob/master/PyTorch_Reformer.ipynb))。其对应的位置嵌入的参数量为 $0.5M \times 1024 \sim 512M$,大小为 2GB。
在将模型加载到内存中或将其保存在硬盘上时,所需要的内存是很大且很没必要的。
Reformer 作者通过将 `config.hidden_size` 维度一分为二,并巧妙地对 $n_\text{max}$ 维进行分解,从而成功地大幅缩小了位置编码的大小。在 transformers 中,用户可以将 `config.axis_pos_shape` 设置为一个含有两个值的列表: $n_\text{max}^ 1$、$n_\text{max}^2$,其中 $n_\text{max}^1 \times n_\text{max}^2 = n_\text{max}$,从而对 $n_\text{max}$ 维度进行分解。同时,用户可以把 `config.axis_pos_embds_dim` 设置为一个含有两个值 $d_{h}^{1}$ 和 $d_{h}^2$ 的列表,其中 $d_{h} ^1 + d_{h}^2 = d_{h}$,从而决定隐藏维度应该如何切割。下面用图示来直观解释一下。
大家可以将对 $n_{\text{max}}$ 的分解视为将其维度折叠到第三个轴,下图所示为 `config.axis_pos_shape = [7, 7]` 分解:

三个直立矩形棱柱分别对应于编码向量 $\mathbf{e}_{1}, \mathbf{e}_{2}, \mathbf{e}_{49}$,我们可以看到 49 个编码向量被分为 7 行,每行 7 个向量。现在的想法是仅使用 7 个编码向量中的一行,并将这些向量扩展到其他 6 行。本质上是想让七行重用一行的值,但是又不能让不同位置的编码向量的值相同,所以要将每个维度 ( _或称_ 高度) 为 `config.hidden_size=4` 的向量切割成两个部分: 大小为 $1$ 的低区编码向量 $\mathbf{e}_\text{down}$ 以及大小为 $3$ 的高区编码向量 $\mathbf{e}_\text{up}$,这样低区就可以沿行扩展而高区可以沿列扩展。为了讲清楚,我们还是画个图。

可以看到,我们已将嵌入向量切为 $\mathbf{e}_\text{down}$ ( _蓝色_ ) 和 $\mathbf{e}_\text{up}$ ( _黄色_ ) 两个部分。现在对 _子_ 向量 $\mathbf{E} _\text{down} = \left[\mathbf{e}_ {\text{down},1}, \ldots, \mathbf{e} _{\text{down},49}\right]$ 仅保留第一行的 7 个子向量, _即_ 图中宽度,并将其沿列 ( _又名_ 深度) 扩展。相反,对 _子_ 向量 $\mathbf{E}_\text{up} = \left[\mathbf{e}_{\text{up},1}, \ldots, \mathbf{e }_{\text{up},49}\right]$ 仅保留第一列的 $7$ 个子向量并沿行扩展。此时,得到的嵌入向量 $\mathbf{e'}_{i}$ 如下:
$$\mathbf{e'}_{i} = \left[ \left[\mathbf{e}_{\text{down, } i \% n_\text{max}^1}\right]^T, \left[\mathbf{e}_{\text{up, } \left \lfloor{\frac{i}{{n}^2_{\text{max}}}}\right \rfloor} \right]^T \right]^T $$
本例中,$n_\text{max}^1 = 7$,$n_\text{max}^2 = 7$ 。这些新编码 $\mathbf{E'} = \left[\mathbf{e'}_{1}, \ldots, \mathbf{e'}_{n_\text{max}}\right]$ 称为 **轴向位置编码**。
下图针对我们的例子对轴向位置编码进行了更详细的说明。

现在应该很清楚如何仅根据维度为 $d_{h}^1 \times n_{\text{max}^1}$ 的 $\mathbf{E}_{\text{down}}$ 及维度为 $d_{h}^2 \times n_{\text{max}}^2$ 的 $\mathbf{E}_{\text{up}}$ 计算最终位置编码向量 $\mathbf{E'}$ 了。
这里的关键是,轴向位置编码能够从设计上确保向量 $\left[\mathbf{e'}_1, \ldots, \mathbf{e'}_{n_{\text{max} }}\right]$ 之间各不相等,并且使编码矩阵的大小从 $n_{\text{max}} \times d_{h}$ 减小到 $n_{\text{max}}^1 \times d_{h}^1 + n_\text{max}^2 \times d_{h}^2$。因为设计上允许每个轴向位置编码向量不同,所以一旦模型中的轴向位置编码训出来后,模型就可以灵活高效地获取位置编码。
为了证明位置编码矩阵的尺寸得到了大幅减小,假设我们为 Reformer 模型设置了参数 `config.axis_pos_shape = [1024, 512]` 以及 `config.axis_pos_embds_dim = [512, 512]` ,且该模型支持的最长输入序列长度为 0.5M 词元。此时,生成的轴向位置编码矩阵的参数量仅为 $1024 \times 512 + 512 \times 512 \sim 800K$,即大约 3MB。这个数字与标准位置编码矩阵所需的 2GB 相比,简直是小巫见大巫。
如需更简洁、更数学化的解释,请参阅 [此处](https://huggingface.co/transformers/model_doc/reformer.html#axis-positional-encodings) 的 🤗Transformers 文档。
### 基准测试
最后,我们对传统位置嵌入与 _轴向位置嵌入_ 的峰值内存消耗进行比较。
```
#@title Installs and Imports
# pip installs
!pip -qq install git+https://github.com/huggingface/transformers.git
!pip install -qq py3nvml
from transformers import ReformerConfig, PyTorchBenchmark, PyTorchBenchmarkArguments, ReformerModel
```
位置嵌入仅取决于两个配置参数: 输入序列允许的最大长度 `config.max_position_embeddings` 以及 `config.hidden_size` 。我们使用一个模型,其支持的输入序列的最大允许长度为 50 万个词元,即 `google/reformer-crime-and-punishment` ,来看看使用轴向位置嵌入后的效果。
首先,我们比较轴向位置编码与标准位置编码的参数形状,及其相应模型的总参数量。
```
config_no_pos_axial_embeds = ReformerConfig.from_pretrained("google/reformer-crime-and-punishment", axial_pos_embds=False) # disable axial positional embeddings
config_pos_axial_embeds = ReformerConfig.from_pretrained("google/reformer-crime-and-punishment", axial_pos_embds=True, axial_pos_embds_dim=(64, 192), axial_pos_shape=(512, 1024)) # enable axial positional embeddings
print("Default Positional Encodings")
print(20 *'-')
model = ReformerModel(config_no_pos_axial_embeds)
print(f"Positional embeddings shape: {model.embeddings.position_embeddings}")
print(f"Num parameters of model: {model.num_parameters()}")
print(20 *'-' + '\n\n')
print("Axial Positional Encodings")
print(20 *'-')
model = ReformerModel(config_pos_axial_embeds)
print(f"Positional embeddings shape: {model.embeddings.position_embeddings}")
print(f"Num parameters of model: {model.num_parameters()}")
print(20 *'-' + '\n\n')
```
HBox(children=(FloatProgress(value=0.0, description='Downloading', max=1151.0, style=ProgressStyle(description…
Default Positional Encodings
--------------------
Positional embeddings shape: PositionEmbeddings(
(embedding): Embedding(524288, 256)
)
Num parameters of model: 136572416
--------------------
Axial Positional Encodings
--------------------
Positional embeddings shape: AxialPositionEmbeddings(
(weights): ParameterList(
(0): Parameter containing: [torch.FloatTensor of size 512x1x64]
(1): Parameter containing: [torch.FloatTensor of size 1x1024x192]
)
)
Num parameters of model: 2584064
--------------------
理解了相应的理论后,读者应该不会对轴向位置编码权重的形状感到惊讶。
从结果中可以看出,对于需要处理如此长输入序列的模型,使用标准位置编码是不切实际的。以 `google/reformer-crime-and-punishment` 为例,仅标准位置编码自身参数量就超过 100M。轴向位置编码可以将这个数字减少到略高于 200K。
最后,我们比较一下推理所需内存。
```
benchmark_args = PyTorchBenchmarkArguments(sequence_lengths=[512], batch_sizes=[8], models=["Reformer-No-Axial-Pos-Embeddings", "Reformer-Axial-Pos-Embeddings"], no_speed=True, no_env_print=True)
benchmark = PyTorchBenchmark(configs=[config_no_pos_axial_embeds, config_pos_axial_embeds], args=benchmark_args)
result = benchmark.run()
```
1 / 2
2 / 2
==================== INFERENCE - MEMORY - RESULT ====================
--------------------------------------------------------------------------------
Model Name Batch Size Seq Length Memory in MB
--------------------------------------------------------------------------------
Reformer-No-Axial-Pos-Embeddin 8 512 959
Reformer-Axial-Pos-Embeddings 8 512 447
--------------------------------------------------------------------------------
可以看出,在 `google/reformer-crime-and-punishment` 模型上,使用轴向位置嵌入可减少大约一半的内存需求。
| 6 |
0 | hf_public_repos/blog | hf_public_repos/blog/zh/deploy-deepfloydif-using-bentoml.md | ---
title: "使用 BentoML 部署 🤗 Hugging Face 上的模型:DeepFloyd IF 实战"
thumbnail: /blog/assets/deploy-deepfloydif-using-bentoml/thumbnail.png
authors:
- user: Sherlockk
guest: true
- user: larme
guest: true
translators:
- user: Sherlockk
---
# 使用 BentoML 部署 🤗 Hugging Face 上的模型:DeepFloyd IF 实战
Hugging Face 的 Model Hub 可以让我们轻松地上传、分享和部署模型,为开发者们节省了从头开始训练模型所需的时间和计算资源。然而,在真实世界的生产环境中或以云原生的方式部署模型则仍然可能带来挑战。
在这一方面,不妨试试 BentoML。BentoML 是一个用于机器学习(ML)模型服务和部署的开源平台,提供了统一的框架,以构建、传输和扩展各类生产就绪的 AI 应用程序,支持传统、预训练、生成式以及大语言模型等各类模型。BentoML 框架的运作原理大致如下:
1. **定义模型**:使用 BentoML 之前,需要准备一个或多个机器学习模型。模型可以使用 TensorFlow 和 PyTorch 等机器学习库进行训练。
1. **保存模型**:模型训练后,将其保存到 BentoML 的本地模型仓库(Model Store)中,这样便于管理所有本地的模型,之后可随时访问并提供服务。
1. **创建 BentoML 服务**: 创建 service.py 文件来包装模型并定义服务逻辑。[服务](https://docs.bentoml.org/en/latest/concepts/service.html) 中会为模型定义 [Runner](https://docs.bentoml.org/en/latest/concepts/runner.html) 以便于大规模的模型推断,并暴露对外 API,定义输入和输出的处理逻辑。
4. **构建 Bento**: 通过创建一个 YAML 配置文件(`bentofile.yaml`),将所有模型和服务打包成一个可部署的制品,即 [Bento](https://docs.bentoml.org/en/latest/concepts/bento.html),其中包含所有代码和依赖文件等。
5. **部署 Bento**:Bento 准备好后,可以将 Bento 容器化以创建 Docker 镜像并在 K8s 上运行。或者,直接将 Bento 部署到 BentoCloud 或 Yatai,以便于在 K8s 上自动化部署流程,进行大规模运行。
在这篇博客文章中,我们将展示如何按照上述工作流程来通过 BentoML 运行 [DeepFloyd IF](https://huggingface.co/docs/diffusers/api/pipelines/if)。
## DeepFloyd IF 简介
DeepFloyd IF 是一个开源的文本到图像生成式模型。其独特的运行机制和架构让它与 Stable Diffusion 等潜在扩散模型(Latent Diffusion Model)区别开来。
DeepFloyd IF 提供了高度的照片真实性,能理解复杂的语言。与 Stable Diffusion 不同,DeepFloyd IF 工作时直接作用于像素维度,其模块化的结构包括一个文本编码器和三个级联像素扩散模块。每个模块在过程中都起到独特的作用:第一阶段负责创建一个基本的 64x64 像素图像,然后在第二和第三阶段逐步放大到 1024x1024 像素。DeepFloyd IF 独特性的另一个关键因素是其集成了大语言模型(T5-XXL-1.1)来编码提示词,这让它对复杂的提示词也能有良好的理解能力。更多信息请参阅 [Stability AI 这篇关于 DeepFloyd IF 的博客文章](https://stability.ai/blog/deepfloyd-if-text-to-image-model)。
为了确保基于 DeepFloyd IF 的应用程序在生产中能高性能地运行,你可能需要巧妙地去分配和管理资源。在这方面,BentoML 支持独立地为每个阶段去扩展 Runner。例如,你可以为第一阶段的 Runner 使用更多的 Pod 或为它们分配性能更强的 GPU 服务器。
## 准备环境
[此 GitHub 仓库](https://github.com/bentoml/IF-multi-GPUs-demo) 中存储了此项目的所有文件。要在本地运行此项目,请确保先满足以下条件:
- 已安装 Python 3.8 及以上版本
- 已安装 `pip`
- 安装机器至少具备 2x16 GB VRAM GPU 或 1x40 VRAM GPU。对于此项目,我们使用了 Google Cloud 的 `n1-standard-16` 机型,加上 64 GB 的 RAM 和 2 个 NVIDIA T4 GPU。请注意,虽然可以在单个 T4 上运行 DeepFloyd IF,但不建议用于生产级服务。
满足条件后,将项目仓库克隆到本地机器并进入项目目录。
```bash
git clone https://github.com/bentoml/IF-multi-GPUs-demo.git
cd IF-multi-GPUs-demo
```
在构建应用程序前,我们先简要浏览一下此目录中的主要文件:
- `import_models.py`: 指定 [`IFPipeline`](https://huggingface.co/docs/diffusers/api/pipelines/if) 中每个阶段的模型。此文件将所有模型下载到本地,以便将它们打包成一个 Bento;
- `requirements.txt`: 定义此项目所需的所有包和依赖;
- `service.py`: 定义 BentoML 服务,该服务使用 `to_runner` 创建三个 Runner,并暴露一个用于生成图像的 API。该 API 输入为 JSON 对象(即提示词和相反提示词),在使用一系列模型后输出图像;
- `start-server.py`: 通过在 `service.py` 中定义的服务启动一个 BentoML HTTP 服务器,并创建一个 Gradio Web UI,用于输入提示词生成图像;
- `bentofile.yaml`: 定义要构建的 Bento 的元数据,包括服务、Python 包和模型等。
建议创建一个虚拟环境用于依赖项隔离。例如,运行以下命令激活 `myenv`:
```bash
python -m venv venv
source venv/bin/activate
```
安装所需的依赖项:
```bash
pip install -r requirements.txt
```
如果之前没有使用过命令行从 Hugging Face 下载模型,你必须先登录:
```bash
pip install -U huggingface_hub
huggingface-cli login
```
## 下载模型到 BentoML 模型仓库
如上所述,你需要下载每个 DeepFloyd IF 阶段所使用的模型。设置好环境后,运行以下命令将模型下载到你的本地模型仓库,该过程可能需要一些时间。
```bash
python import_models.py
```
下载完成后,查看模型仓库中的模型。
```bash
$ bentoml models list
Tag Module Size Creation Time
sd-upscaler:bb2ckpa3uoypynry bentoml.diffusers 16.29 GiB 2023-07-06 10:15:53
if-stage2:v1.0 bentoml.diffusers 13.63 GiB 2023-07-06 09:55:49
if-stage1:v1.0 bentoml.diffusers 19.33 GiB 2023-07-06 09:37:59
```
## 启动 BentoML 服务
可以直接使用 `start-server.py` 文件启动 BentoML HTTP 服务器,运行 Gradio Web UI,这也是该应用程序的入口。可以通过各种选项来自定义运行环境,管理不同阶段的 GPU 资源分配。根据你的 GPU 设置,你可能会使用不同的命令:
- 对于具有超过 40 GB VRAM 的 GPU,可在同一 GPU 上运行所有模型:
```bash
python start-server.py
```
- 如果有两个 Tesla T4 且每个具有 15 GB VRAM,可将第一个 GPU 分配给第一阶段模型,将第二个 GPU 分配给第二和第三阶段模型:
```bash
python start-server.py --stage1-gpu=0 --stage2-gpu=1 --stage3-gpu=1
```
- 如果一个 Tesla T4 具有 15 GB VRAM 而另两个 GPU 的 VRAM 较小,可将 T4 分配给第一阶段模型,第二和第三个 GPU 分别分配给第二和第三阶段模型:
```bash
python start-server.py --stage1-gpu=0 --stage2-gpu=1 --stage3-gpu=2
```
要查看所有可自定义的选项(如服务器的端口),可运行以下命令:
```bash
python start-server.py --help
```
## 测试服务器
服务器启动后,可以通过 http://localhost:7860 访问该 Web UI。
BentoML API Endpoint 也可在 http://localhost:3000 访问。以下是提示词和相反提示词示例。
提示词(Prompt):
> orange and black, head shot of a woman standing under street lights, dark theme, Frank Miller, cinema, ultra realistic, ambiance, insanely detailed and intricate, hyper realistic, 8k resolution, photorealistic, highly textured, intricate details
相反提示词(Negative Prompt):
> tiling, poorly drawn hands, poorly drawn feet, poorly drawn face, out of frame, mutation, mutated, extra limbs, extra legs, extra arms, disfigured, deformed, cross-eye, body out of frame, blurry, bad art, bad anatomy, blurred, text, watermark, grainy
输出结果:

## 构建 Bento 并提供服务
成功在本地运行 DeepFloyd IF 后,可在项目目录下运行以下命令将其打包成 Bento。
```bash
$ bentoml build
Converting 'IF-stage1' to lowercase: 'if-stage1'.
Converting 'IF-stage2' to lowercase: 'if-stage2'.
Converting DeepFloyd-IF to lowercase: deepfloyd-if.
Building BentoML service "deepfloyd-if:6ufnybq3vwszgnry" from build context "/Users/xxx/Documents/github/IF-multi-GPUs-demo".
Packing model "sd-upscaler:bb2ckpa3uoypynry"
Packing model "if-stage1:v1.0"
Packing model "if-stage2:v1.0"
Locking PyPI package versions.
██████╗░███████╗███╗░░██╗████████╗░█████╗░███╗░░░███╗██╗░░░░░
██╔══██╗██╔════╝████╗░██║╚══██╔══╝██╔══██╗████╗░████║██║░░░░░
██████╦╝█████╗░░██╔██╗██║░░░██║░░░██║░░██║██╔████╔██║██║░░░░░
██╔══██╗██╔══╝░░██║╚████║░░░██║░░░██║░░██║██║╚██╔╝██║██║░░░░░
██████╦╝███████╗██║░╚███║░░░██║░░░╚█████╔╝██║░╚═╝░██║███████╗
╚═════╝░╚══════╝╚═╝░░╚══╝░░░╚═╝░░░░╚════╝░╚═╝░░░░░╚═╝╚══════╝
Successfully built Bento(tag="deepfloyd-if:6ufnybq3vwszgnry").
```
查看本地 Bento 仓库中的 Bento:
```bash
$ bentoml list
Tag Size Creation Time
deepfloyd-if:6ufnybq3vwszgnry 49.25 GiB 2023-07-06 11:34:52
```
可以在生产环境中使用该 Bento 提供服务:
```bash
bentoml serve deepfloyd-if:6ufnybq3vwszgnry
```
要以更云原生的方式部署 Bento,运行以下命令生成 Docker 镜像:
```bash
bentoml containerize deepfloyd-if:6ufnybq3vwszgnry
```
之后可以在 K8s 上部署该模型。
## 下一步计划
[BentoML](https://github.com/bentoml/BentoML) 为生产部署 Hugging Face 模型提供了高效且简单的方法。它支持一系列 ML 框架,提供易于使用的 API,可以在短时间内将模型部署到生产环境。无论是 DeepFloyd IF 还是 Hugging Face Model Hub 上的任何其他模型,BentoML 都可以帮助快速部署模型到生产环境。
感兴趣的朋友可以查看以下资源,了解 BentoML 及其生态系统,关注有关 BentoML 的更多信息。
- [OpenLLM](https://github.com/bentoml/OpenLLM) - 在生产环境中运行和管理大语言模型的开源平台;
- [StableDiffusion](https://github.com/bentoml/stable-diffusion-bentoml) - 在生产环境中运行和管理扩散模型的开源平台;
- [Transformer NLP Service](https://github.com/bentoml/transformers-nlp-service) - Transformer NLP 模型的在线推理 API;
- 加入 [BentoML 在 Slack 上的线上社区](https://l.bentoml.com/join-slack);
- 在 [Twitter](https://twitter.com/bentomlai) 和 [LinkedIn](https://www.linkedin.com/company/bentoml/) 关注 BentoML
本文(中文版)[原载](https://mp.weixin.qq.com/s/I1vZhAqZocQELAiLmj2aJQ) 于公众号「语言挖掘师」,为本文英文原文作者、本文译者的内容发布平台。 | 7 |
0 | hf_public_repos/blog | hf_public_repos/blog/zh/speecht5.md | ---
title: "使用 SpeechT5 进行语音合成、识别和更多功能"
thumbnail: /blog/assets/speecht5/thumbnail.png
authors:
- user: Matthijs
translators:
- user: innovation64
---
# 使用 SpeechT5 进行语音合成、识别和更多功能
我们很高兴地宣布,SpeechT5 现在可用于 🤗 Transformers (一个开源库,提供最前沿的机器学习模型实现的开源库)。
SpeechT5 最初见于微软亚洲研究院的这篇论文 [SpeechT5: Unified-Modal Encoder-Decoder Pre-Training for Spoken Language Processing](https://arxiv.org/abs/2110.07205)。论文作者发布的 [官方检查点](https://github.com/microsoft/SpeechT5) 可在 Hugging Face Hub 上找到。
如果您想直接尝试,这里有一些在 Spaces 上的演示:
- [语音合成 (TTS)](https://huggingface.co/spaces/Matthijs/speecht5-tts-demo)
- [语音转换](https://huggingface.co/spaces/Matthijs/speecht5-vc-demo)
- [自动语音识别](https://huggingface.co/spaces/Matthijs/speecht5-asr-demo)
## 介绍
SpeechT5 不是一种,也不是两种,而是一种架构中的三种语音模型。
它可以做:
- **语音到文本** 用于自动语音识别或说话人识别;
- **文本转语音** 用于合成音频;
- **语音到语音** 用于在不同语音之间进行转换或执行语音增强。
SpeechT5 背后的主要思想是在文本到语音、语音到文本、文本到文本和语音到语音数据的混合体上预训练单个模型。这样,模型可以同时从文本和语音中学习。这种预训练方法的结果是一个模型,该模型具有由文本和语音共享的隐藏表示的 **统一空间**。
SpeechT5 的核心是一个常规的 **Transformer 编码器 - 解码器** 模型。就像任何其他 Transformer 一样,编码器 - 解码器网络使用隐藏表示对序列到序列的转换进行建模。这个 Transformer 骨干对于所有 SpeechT5 任务都是一样的。
为了使同一个 Transformer 可以同时处理文本和语音数据,添加了所谓的 **pre-nets** 和 **post-nets**。per-nets 的工作是将输入文本或语音转换为 Transformer 使用的隐藏表示。post-nets 从 Transformer 获取输出并将它们再次转换为文本或语音。
下图展示了 SpeechT5 的架构 (摘自 [原始论文](https://arxiv.org/abs/2110.07205))。
<div align="center">
<img alt="SpeechT5 architecture diagram" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/blog/speecht5/architecture.jpg"/>
</div>
在预训练期间,同时使用所有的 per-nets 和 post-nets 。预训练后,整个编码器 - 解码器主干在单个任务上进行微调。这种经过微调的模型仅使用特定于给定任务的 per-nets 和 post-nets 。例如,要将 SpeechT5 用于文本到语音转换,您需要将文本编码器 per-nets 交换为文本输入,将语音解码器 per-nets 和 post-nets 交换为语音输出。
注意: 即使微调模型一开始使用共享预训练模型的同一组权重,但最终版本最终还是完全不同。例如,您不能采用经过微调的 ASR 模型并换掉 per-nets 和 post-nets 来获得有效的 TTS 模型。SpeechT5 很灵活,但不是 *那么* 灵活。
## 文字转语音
SpeechT5 是我们添加到 🤗 Transformers 的 **第一个文本转语音模型**,我们计划在不久的将来添加更多的 TTS 模型。
对于 TTS 任务,该模型使用以下 per-net 和 post-net:
- **文本编码器 per-net。** 一个文本嵌入层,将文本标记映射到编码器期望的隐藏表示。类似于 BERT 等 NLP 模型中发生的情况。
- **语音解码器 per-net。** 这将对数梅尔频谱图作为输入,并使用一系列线性层将频谱图压缩为隐藏表示。此设计取自 Tacotron 2 TTS 模型。
- **语音解码器 post-net。** 这预测了一个残差以添加到输出频谱图中并用于改进结果,同样来自 Tacotron 2。
微调模型的架构如下所示。
<div align="center">
<img alt="SpeechT5 architecture for text-to-speech" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/blog/speecht5/tts.jpg"/>
</div>
以下是如何使用 SpeechT5 文本转语音模型合成语音的完整示例。您还可以在 [交互式 Colab 笔记本](https://colab.research.google.com/drive/1XnOnCsmEmA3lHmzlNRNxRMcu80YZQzYf?usp=sharing) 中进行操作。
SpeechT5 在最新版本的 Transformers 中尚不可用,因此您必须从 GitHub 安装它。还要安装附加的依赖语句,然后重新启动运行。
```python
pip install git+https://github.com/huggingface/transformers.git
pip install sentencepiece
```
首先,我们从 Hub 加载 [微调模型](https://huggingface.co/microsoft/speecht5_tts),以及用于标记化和特征提取的处理器对象。我们将使用的类是 `SpeechT5ForTextToSpeech`。
```python
from transformers import SpeechT5Processor, SpeechT5ForTextToSpeech
processor = SpeechT5Processor.from_pretrained("microsoft/speecht5_tts")
model = SpeechT5ForTextToSpeech.from_pretrained("microsoft/speecht5_tts")
```
接下来,标记输入文本。
```python
inputs = processor(text="Don't count the days, make the days count.", return_tensors="pt")
```
SpeechT5 TTS 模型不限于为单个说话者创建语音。相反,它使用所谓的 **speaker embeddings** 来捕捉特定说话者的语音特征。我们将从 Hub 上的数据集中加载这样一个 Speaker Embeddings。
```python
from datasets import load_dataset
embeddings_dataset = load_dataset("Matthijs/cmu-arctic-xvectors", split="validation")
import torch
speaker_embeddings = torch.tensor(embeddings_dataset[7306]["xvector"]).unsqueeze(0)
```
**speaker embeddings** 是形状为 (1, 512) 的张量。这个特定的 Speaker Embeddings 描述了女性的声音。使用 [此脚本](https://huggingface.co/mechanicalsea/speecht5-vc/blob/main/manifest/utils/prep_cmu_arctic_spkemb.py) 从 [CMU ARCTIC](http://www.festvox.org/cmu_arctic/) 数据集获得嵌入 /utils/prep_cmu_arctic_spkemb.py,任何 X-Vector 嵌入都应该有效。
现在我们可以告诉模型在给定输入标记和 Speaker Embeddings 的情况下生成语音。
```python
spectrogram = model.generate_speech(inputs["input_ids"], speaker_embeddings)
```
这会输出一个形状为 (140, 80) 的张量,其中包含对数梅尔谱图。第一个维度是序列长度,它可能在运行之间有所不同,因为语音解码器 per-net 总是对输入序列应用 dropout。这为生成的语音增加了一些随机变化。
要将预测的对数梅尔声谱图转换为实际的语音波形,我们需要一个 **vocoder**。理论上,您可以使用任何适用于 80-bin 梅尔声谱图的声码器,但为了方便起见,我们在基于 HiFi-GAN 的 Transformers 中提供了一个。此 [声码器的权重](https://huggingface.co/mechanicalsea/speecht5-tts),以及微调 TTS 模型的权重,由 SpeechT5 的原作者友情提供。
加载声码器与任何其他 🤗 Transformers 模型一样简单。
```python
from transformers import SpeechT5HifiGan
vocoder = SpeechT5HifiGan.from_pretrained("microsoft/speecht5_hifigan")
```
要从频谱图中制作音频,请执行以下操作:
```python
with torch.no_grad():
speech = vocoder(spectrogram)
```
我们还提供了一个快捷方式,因此您不需要制作频谱图的中间步骤。当您将声码器对象传递给 generate_speech 时,它会直接输出语音波形。
```python
speech = model.generate_speech(inputs["input_ids"], speaker_embeddings, vocoder=vocoder)
```
最后,将语音波形保存到文件中。SpeechT5 使用的采样率始终为 16 kHz。
```python
import soundfile as sf
sf.write("tts_example.wav", speech.numpy(), samplerate=16000)
```
输出听起来 [像这样](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/blog/speecht5/tts_example.wav):
<audio controls>
<source src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/blog/speecht5/tts_example.wav" type="audio/wav">
Your browser does not support the audio element.
</audio>
T这就是 TTS 模型!使这个声音好听的关键是使用正确的 speaker embeddings。
您可以在 Spaces 上进行 [交互式演示](https://huggingface.co/spaces/Matthijs/speecht5-tts-demo)。
## 语音转语音的语音转换
从概念上讲,使用 SpeechT5 进行语音转语音建模与文本转语音相同。只需将文本编码器 per-net 换成语音编码器 per-net 即可。模型的其余部分保持不变。
<div align="center">
<img alt="SpeechT5 architecture for speech-to-speech" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/blog/speecht5/s2s.jpg"/>
</div>
**语音编码器 per-net** 与 [wav2vec 2.0](https://huggingface.co/docs/transformers/model_doc/wav2vec2) 中的特征编码模块相同。它由卷积层组成,这些卷积层将输入波形下采样为一系列音频帧表示。
作为语音到语音任务的示例,SpeechT5 的作者提供了一个 [微调检查点](https://huggingface.co/microsoft/speecht5_vc) 用于进行语音转换。要使用它,首先从 Hub 加载模型。请注意,模型类现在是 `SpeechT5ForSpeechToSpeech`。
```python
from transformers import SpeechT5Processor, SpeechT5ForSpeechToSpeech
processor = SpeechT5Processor.from_pretrained("microsoft/speecht5_vc")
model = SpeechT5ForSpeechToSpeech.from_pretrained("microsoft/speecht5_vc")
```
我们需要一些语音音频作为输入。出于本示例的目的,我们将从 Hub 上的小型语音数据集加载音频。您也可以加载自己的语音波形,只要它们是单声道的并且使用 16 kHz 的采样率即可。我们在这里使用的数据集中的样本已经采用这种格式。
```python
from datasets import load_dataset
dataset = load_dataset("hf-internal-testing/librispeech_asr_demo", "clean", split="validation")
dataset = dataset.sort("id")
example = dataset[40]
```
接下来,对音频进行预处理,使其采用模型期望的格式。
```python
sampling_rate = dataset.features["audio"].sampling_rate
inputs = processor(audio=example["audio"]["array"], sampling_rate=sampling_rate, return_tensors="pt")
```
与 TTS 模型一样,我们需要 speaker embeddings。这些描述了目标语音听起来像什么。
```python
import torch
embeddings_dataset = load_dataset("Matthijs/cmu-arctic-xvectors", split="validation")
speaker_embeddings = torch.tensor(embeddings_dataset[7306]["xvector"]).unsqueeze(0)
```
我们还需要加载声码器以将生成的频谱图转换为音频波形。让我们使用与 TTS 模型相同的声码器。
```python
from transformers import SpeechT5HifiGan
vocoder = SpeechT5HifiGan.from_pretrained("microsoft/speecht5_hifigan")
```
现在我们可以通过调用模型的 `generate_speech` 方法来执行语音转换。
```python
speech = model.generate_speech(inputs["input_values"], speaker_embeddings, vocoder=vocoder)
import soundfile as sf
sf.write("speech_converted.wav", speech.numpy(), samplerate=16000)
```
更改为不同的声音就像加载新的 Speaker Embeddings 一样简单。您甚至可以嵌入自己的声音!
原始输入 ([下载链接](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/blog/speecht5/speech_original.wav)):
<audio controls>
<source src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/blog/speecht5/speech_original.wav" type="audio/wav">
Your browser does not support the audio element.
</audio>
转换后的语音: ([下载链接](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/blog/speecht5/speech_converted.wav)):
<audio controls>
<source src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/blog/speecht5/speech_converted.wav" type="audio/wav">
Your browser does not support the audio element.
</audio>
请注意,此示例中转换后的音频在句子结束前被切断。这可能是由于两个句子之间的停顿导致 SpeechT5 (错误地) 预测已经到达序列的末尾。换个例子试试,你会发现转换通常是正确的,但有时会过早停止。
您可以进行 [交互式演示](https://huggingface.co/spaces/Matthijs/speecht5-vc-demo)。🔥
## 用于自动语音识别的语音转文本
ASR 模型使用以下 pre-nets 和 post-net:
- **语音编码器 per-net。** 这是语音到语音模型使用的相同预网,由来自 wav2vec 2.0 的 CNN 特征编码器层组成。
- **文本解码器 per-net。** 与 TTS 模型使用的编码器预网类似,它使用嵌入层将文本标记映射到隐藏表示中。(在预训练期间,这些嵌入在文本编码器和解码器预网之间共享。)
- **文本解码器 post-net。** 这是其中最简单的一个,由一个线性层组成,该层将隐藏表示投射到词汇表上的概率。
微调模型的架构如下所示。
<div align="center">
<img alt="SpeechT5 architecture for speech-to-text" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/blog/speecht5/asr.jpg"/>
</div>
如果您之前尝试过任何其他 🤗 Transformers 语音识别模型,您会发现 SpeechT5 同样易于使用。最快的入门方法是使用流水线。
```python
from transformers import pipeline
generator = pipeline(task="automatic-speech-recognition", model="microsoft/speecht5_asr")
```
作为语音音频,我们将使用与上一节相同的输入,任何音频文件都可以使用,因为流水线会自动将音频转换为正确的格式。
```python
from datasets import load_dataset
dataset = load_dataset("hf-internal-testing/librispeech_asr_demo", "clean", split="validation")
dataset = dataset.sort("id")
example = dataset[40]
```
现在我们可以要求流水线处理语音并生成文本转录。
```python
transcription = generator(example["audio"]["array"])
```
打印转录输出:
```text
a man said to the universe sir i exist
```
听起来完全正确!SpeechT5 使用的分词器非常基础,是字符级别工作。因此,ASR 模型不会输出任何标点符号或大写字母。
当然也可以直接使用模型类。首先,加载 [微调模型](https://huggingface.co/microsoft/speecht5_asr) 和处理器对象。该类现在是 `SpeechT5ForSpeechToText`。
```python
from transformers import SpeechT5Processor, SpeechT5ForSpeechToText
processor = SpeechT5Processor.from_pretrained("microsoft/speecht5_asr")
model = SpeechT5ForSpeechToText.from_pretrained("microsoft/speecht5_asr")
```
预处理语音输入:
```python
sampling_rate = dataset.features["audio"].sampling_rate
inputs = processor(audio=example["audio"]["array"], sampling_rate=sampling_rate, return_tensors="pt")
```
最后,告诉模型从语音输入中生成文本标记,然后使用处理器的解码功能将这些标记转换为实际文本。
```python
predicted_ids = model.generate(**inputs, max_length=100)
transcription = processor.batch_decode(predicted_ids, skip_special_tokens=True)
```
播放 [语音到文本任务](https://huggingface.co/spaces/Matthijs/speecht5-asr-demo) 的交互式演示。
## 结论
SpeechT5 是一个有趣的模型,因为与大多数其他模型不同,它允许您使用相同的架构执行多项任务。只有 per-net 和 post-net 发生变化。通过在这些组合任务上对模型进行预训练,它在微调时变得更有能力完成每个单独的任务。
目前我们只介绍了语音识别 (ASR)、语音合成 (TTS) 和语音转换任务,但论文还提到该模型已成功用于语音翻译、语音增强和说话者识别。如此广泛的用途,前途不可估量!
| 8 |
0 | hf_public_repos/blog | hf_public_repos/blog/zh/large-language-models.md | ---
title: "大语言模型:新的摩尔定律?"
thumbnail: /blog/assets/33_large_language_models/01_model_size.jpg
authors:
- user: juliensimon
translators:
- user: MatrixYao
- user: zhongdongy
proofreader: true
---
# 大语言模型:新的摩尔定律?
不久前,微软和 Nvidia [推出了](https://www.microsoft.com/en-us/research/blog/using-deepspeed-and-megatron-to-train-megatron-turing-nlg-530b-the-worlds-largest-and-most-powerful-generative-language-model/) Megatron-Turing NLG 530B,一种基于 Transformer 的模型,被誉为是 “*世界上最大且最强的生成语言模型*”。
毫无疑问,此项成果对于机器学习工程来讲是一场令人印象深刻的能力展示,表明我们的工程能力已经能够训练如此巨大的模型。然而,我们应该为这种超级模型的趋势感到兴奋吗?我个人倾向于否定的回答。我将在通过本文阐述我的理由。
<kbd>
<img src="../assets/33_large_language_models/01_model_size.jpg">
</kbd>
### 这是你的深度学习大脑
研究人员估计,人脑平均包含 [860](https://pubmed.ncbi.nlm.nih.gov/19226510/) 亿个神经元和 100 万亿个突触。可以肯定的是,这里面并非所有的神经元和突触都用于语言。有趣的是,GPT-4 [预计](https://www.wired.com/story/cerebras-chip-cluster-neural-networks-ai/) 有大约 100 万亿个参数...... 虽然这个类比很粗略,但难道我们不应该怀疑一下构建与人脑大小相当的语言模型长期来讲是否是最佳方案?
当然,我们的大脑是一个了不起的器官,它经过数百万年的进化而产生,而深度学习模型仅有几十年的历史。不过,我们的直觉告诉我们: 有些东西无法计算 (这是个双关语,:)) 。
### 深度学习,深度销金窟?
如你所料,在庞大的文本数据集上训练一个 5300 亿参数的模型需要相当多的基础设施。事实上,Microsoft 和 Nvidia 使用了数百台 DGX A100 GPU 服务器,每台 19 万 9 千美元。如果再把网络设备、托管成本等因素考虑进去的话,任何想要重现该实验的组织或个人都必须花费近 1 亿美元。来根薯条压压惊?
说真的,有哪些组织有那种值得花费 1 亿美元来构建深度学习基础设施的业务?再少点,又有哪些组织有那种可以值得花费 1000 万美元基础设施的业务?很少。既然很少,那么请问,这些模型为谁而生呢?
### GPU 集群的热
尽管训练大模型需要杰出的工程能力,但在 GPU 上训练深度学习模型本身却是一种蛮力技术。根据规格表,每台 DGX 服务器可消耗高达 6.5 千瓦的功率。同时,数据中心 (或服务器机柜) 至少需要同样多的冷却能力。除非你是史塔克家族的人 (Starks) ,需要在冬天让临冬城 (Winterfell) 保持温暖,否则你必须处理散热问题。
此外,随着公众对气候和社会责任问题意识的增强,还需要考虑碳足迹问题。根据马萨诸塞大学 2019 年的一项 [研究](https://arxiv.org/pdf/1906.02243.pdf),“*在 GPU 上训练一次 BERT 产生的碳足迹大致与一次跨美飞行相当*”。
BERT-Large 有 3.4 亿个参数。我们可以通过此推断 Megatron-Turing 的碳足迹大致如何……认识我的人都知道,我并不是一个热血环保主义者。尽管如此,这些数字也不容忽视。
### 所以呢?
我对 Megatron-Turing NLG 530B 和接下来可能会出现的模型巨兽感到兴奋吗?不。我认为值得增加成本、复杂性以及碳足迹去换取 (相对较小的) 测试基准上的改进吗?不。我认为构建和推广这些庞大的模型能帮助组织理解和应用机器学习吗?不。
我想知道这一切有什么意义。为了科学而科学?好的老营销策略?技术至上?可能每个都有一点。如果是这些意义的话,我就不奉陪了。
相反,我更专注于实用且可操作的技术,大家都可以使用这些技术来构建高质量的机器学习解决方案。
### 使用预训练模型
在绝大多数情况下,你不需要自定义模型架构。也许你会 想要 自己定制一个模型架构 (这是另一回事),但请注意此处猛兽出没,仅限资深玩家!
一个好的起点是寻找已经针对你要解决的任务预训练过的 [模型](https://huggingface.co/models) (例如,[英文文本摘要](https://huggingface.co/models?language=en&pipeline_tag=summarization&sort=downloads)) 。
然后,你应该快速尝试一些模型,用它们来预测你自己的数据。如果指标效果不错,那么打完收工!如果还需要更高一点的准确率,你应该考虑对模型进行微调 (稍后会详细介绍) 。
### 使用较小的模型
在评估模型时,你应该从那些精度满足要求的模型中选择尺寸最小的那个。它预测得更快,并且需要更少的硬件资源来进行训练和推理。节俭需要从一开始就做起。
这其实也不算什么新招。计算机视觉从业者会记得 [SqueezeNet](https://arxiv.org/abs/1602.07360) 2017 年问世时,与 [AlexNet](https://papers.nips.cc/paper/2012/hash/c399862d3b9d6b76c8436e924a68c45b-Abstract.html) 相比,模型尺寸减少了 50 倍,而准确率却与 AlexNet 相当甚至更高。多聪明!
自然语言处理社区也在致力于使用迁移学习技术缩减模型尺寸,如使用 [知识蒸馏技术](https://en.wikipedia.org/wiki/Knowledge_distillation)。[DistilBERT](https://arxiv.org/abs/1910.01108) 也许是其中最广为人知的工作。与原始 BERT 模型相比,它保留了 97% 的语言理解能力,同时尺寸缩小了 40%,速度提高了 60%。你可以 [Hugging Face](https://huggingface.co/distilbert-base-uncased) 尝试一下 DistilBERT。同样的方法也已经应用于其他模型,例如 Facebook 的 [BART](https://arxiv.org/abs/1910.13461),你可以 [在 Hugging Face 尝试 DistilBART](https://huggingface.co/models?search=distilbart)。
[Big Science](https://bigscience.huggingface.co/) 项目的最新模型也令人印象深刻。下面这张来自于 [论文](https://arxiv.org/abs/2110.08207) 的图表明,他们的 T0 模型在许多任务上都优于 GPT-3,同时尺寸小 16 倍。
<kbd>
<img src="../assets/33_large_language_models/02_t0.png">
</kbd>
你可以 [Hugging Face](https://huggingface.co/bigscience/T0pp) 尝试 T0。我们需要更多的此类研究!
### 微调模型
如果你需要特化一个模型,你不应该从头开始训练它。相反,你应该对其进行微调,也就是说,仅针对你自己的数据训练几个回合。如果你缺少数据,也许 [这些数据集](https://huggingface.co/datasets) 中的某个可以帮助你入门。
猜对了,这是进行迁移学习的另一种方式,它会帮助你节省一切!
- 收集、存储、清理和标注的数据更少,
- 更快的实验和迭代,
- 生产过程所需的资源更少。
换句话说: 节省时间,节省金钱,节省硬件资源,拯救世界!
如果你需要教程,Hugging Face [课程](https://huggingface.co/course)可以帮助你立即入门。
### 使用云基础设施
不管你是否喜欢它们,事实是云公司懂得如何构建高效的基础设施。可持续性研究表明,基于云的基础设施比其他替代方案更节能减排: 请参阅 [AWS](https://sustainability.aboutamazon.com/environment/the-cloud)、[Azure](https://azure.microsoft.com/en-us/global-infrastructure/sustainability) 和 [Google](https://cloud.google.com/sustainability)。Earth.org [宣称](https://earth.org/environmental-impact-of-cloud-computing/)虽然云基础设施并不完美,“*[它] 比替代方案更节能,并促进了环境友好的服务及经济增长。*"
在易用性、灵活性和随用随付方面,云肯定有很多优势。它也比你想象的更环保。如果你的 GPU 不够用,为什么不尝试在 AWS 的机器学习托管服务 [Amazon SageMaker](https://aws.amazon.com/sagemaker/) 上微调你的 Hugging Face 模型?我们为你准备了 [大量示例](https://huggingface.co/docs/sagemaker/train)。
### 优化你的模型
从编译器到虚拟机,软件工程师长期以来一直在使用能够针对任何运行硬件自动优化代码的工具。
然而,机器学习社区仍在这个课题上苦苦挣扎,这是有充分理由的。优化模型的尺寸和速度是一项极其复杂的任务,其中涉及以下技术:
- 专用硬件加速: 如训练加速硬件 ([Graphcore](https://www.graphcore.ai/)、[Habana](https://habana.ai/)) 、推理加速硬件 ([Google TPU](https://cloud.google.com/tpu),[AWS Inferentia](https://aws.amazon.com/machine-learning/inferentia/))。
- 剪枝: 删除对预测结果影响很小或没有影响的模型参数。
- 融合: 合并模型层 (例如,卷积和激活) 。
- 量化: 以较小的位深存储模型参数 (例如,使用 8 位而不是 32 位)
幸运的是,自动化工具开始出现,例如 [Optimum](https://huggingface.co/hardware) 开源库和 [Infinity](https://huggingface.co/infinity),Infinity 是一个最低能以 1 毫秒的延迟提供 Transformers 推理能力的容器化解决方案。
### 结论
在过去的几年里,大语言模型的尺寸平均每年增长 10 倍。这开始看起来像另一个 [摩尔定律](https://en.wikipedia.org/wiki/Moore%27s_law)。
这条路似曾相识,我们应该知道这条路迟早会遇到收益递减、成本增加、复杂性等问题以及新的风险。指数的结局往往不是会很好。还记得 [Meltdown and Spectre](https://meltdownattack.com/) 吗?我们想知道人工智能的 Meltdown and Spectre 会是什么吗?
与其赌上你的时间精力和金钱去追求万亿参数的模型,我们构建一些更实际也更有效的,能造福所有开发者解决现实问题的解决方案不是更好吗?
*对 Hugging Face 可以在哪些方面帮助您的组织构建和部署生产级别的机器学习解决方案感兴趣?欢迎联系 [[email protected]](mailto:[email protected]) (猎头和销售勿扰哦))。*
| 9 |
0 | hf_public_repos | hf_public_repos/blog/gradio-5-security.md | ---
title: "A Security Review of Gradio 5"
thumbnail: /blog/assets/gradio-5-security/thumbnail.png
authors:
- user: abidlabs
- user: pngwn
---
# A Security Review of Gradio 5
**We audited Gradio 5 so that your machine learning apps are safe!**
In the past few years, [Gradio](https://github.com/gradio-app/gradio/) (>6 million monthly Pypi installs) has become the default way to build machine learning web applications in Python. In just a few lines of code, you can create a user interface for an image generation app, a chatbot, or any other kind of ML app _and_ share it with others using Gradio’s built-in share links or [Hugging Face Spaces](https://huggingface.co/spaces).
```py
import gradio as gr
def generate(seed, prompt):
...
return image
# gr.Interface creates a web-based UI
gr.Interface(
generate,
inputs=[gr.Slider(), gr.Textbox()],
outputs=[gr.Image()]
).launch(share=True)
# share=True generates a public link instantly
```
Our goal with Gradio is to allow developers to build web applications that work great out-of-the-box for machine learning use cases. This has meant letting you, as a developer, easily build applications that:
* Scale easily to large numbers of concurrent users
* Are accessible to as many users as possible
* Provide consistent UI, UX, and theming
* Work reliably across a large number of browsers and devices
...even if you are not an expert in scaling, accessibility, or UI/UX!
Now, we’re adding **web** **security** to this list. We asked [Trail of Bits](https://www.trailofbits.com/), a well-known cybersecurity company, to conduct an independent audit of Gradio. The security issues they discovered were all fixed ahead of the Gradio 5 release.
This means that machine learning apps that **you build** with Gradio 5 **will follow best practices when it comes to web security** without any significant changes to your code.
## Why a security audit?
In the past couple of years, the Gradio team has worked with the community to patch security vulnerabilities as they are discovered. But as Gradio’s popularity has grown (with >470,000 Gradio apps currently on Hugging Face Spaces), ensuring security has become even more important.
So in Gradio 5, we decided to take a different approach -- do a _preemptive_ security audit of the Gradio codebase so that your machine learning applications built with Gradio 5 are safe by default.
We asked Trail of Bits to conduct an independent and comprehensive audit of Gradio. Their team of experts in AI and Application Security identified security risks in the Gradio codebase in 4 general scenarios:
* Gradio apps running locally
* Gradio apps deployed on Hugging Face Spaces or other servers
* Gradio apps shared with built-in share links
* Supply chain vulnerabilities originating from the Gradio CI pipeline

Then, we worked closely with Trail of Bits to identify mitigation strategies for each of these risks. Gradio’s simplicity and ease of use, while beneficial for developers, also presented unique security challenges, as we didn’t want developers to need to set up complex security measures like CORS and CSP policies.
By the end of the collaboration, we fixed all of the security risks that were identified by Trail of Bits. All the fixes were validated by Trail of Bits, and are included in the Gradio 5.0 release. While it is impossible to prove the absence of security vulnerabilities, this is a major step in giving reassurance that your Gradio apps are safe.
## Major findings
We outline below the major security vulnerabilities that were discovered by Trail of Bits corresponding to the 4 scenarios above. In the interest of transparency and the spirit of open-source, we are making the [full security report public](https://github.com/trailofbits/publications/blob/master/reviews/2024-10-huggingface-gradio-securityreview.pdf), and more details for each of these issues can be found in the report.
**Gradio apps running locally**
* **TOB-GRADIO-1** and **TOB-GRADIO-2**: Misconfigurations in the server’s CORS policy that, in the context of an authenticated Gradio server, would allow attackers to steal access tokens and take over a victim’s accounts when they visit their malicious website.
**Gradio apps deployed on Hugging Face Spaces or other servers**
* **TOB-GRADIO-3**: A full read GET-based SSRF that would allow attackers to make requests to and read the responses from arbitrary endpoints, including those on the user’s internal network.
* **TOB-GRADIO-10**: Arbitrary file type uploads that would allow an attacker to host XSS payloads on a user’s Gradio server. In the context of an authenticated Gradio server, an attacker could use this to take over user accounts when the victim accesses an attacker’s malicious website.
* **TOB-GRADIO-13**: A race condition that allows an attacker to reroute user traffic to their server and steal uploaded files or chatbot conversations.
* **TOB-GRADIO-16**: Several components’ post-process functions could allow attackers to leak arbitrary files in very simple Gradio server configurations.
**Gradio apps shared with built-in share links**
* **TOB-GRADIO-19**: Remote code execution (RCE) with the root user on the Gradio API Server via a nginx misconfiguration that exposed the unauthenticated docker API. This allowed an attacker to provide a malicious host and port in step 2 of the diagram and redirect all frp tunnels to a malicious server that records all user traffic, including uploaded files and chatbox conversations.
* **TOB-GRADIO-11**: Lack of robust encryption in communications between the frp-client and frp-server, allowing attackers in a position to intercept requests (the ones from steps 6 and 7 in the diagram above) to read and modify the data going to and from the frp-server.
**Supply chain vulnerabilities originating from the Gradio CI pipeline**
* **TOB-GRADIO-25**: Several GitHub Actions workflows in the Gradio repository use third-party actions pinned to tags or branch names instead of full commit SHAs. This could allow malicious actors to silently modify actions, potentially leading to tampering with application releases or leaking secrets.
* Separately, a [GitHub security researcher reported](https://github.com/gradio-app/gradio/security/advisories/GHSA-48pj-2428-pp3w) that our GitHub actions could allow untrusted code execution and secret exfiltration if an attacker triggered a workflow and cleverly dumped the memory of GitHub runners.
## Going forward
We’re very grateful to Trail of Bits for the comprehensive security audit of Gradio and for validating the mitigations that we put in place for Gradio 5.
Going forward, we are planning to continue working with the security community to identify and mitigate security issues in Gradio. We have also added security unit tests to our test suite, fuzzer tests specifically designed to identify potential vulnerabilities, and are using static analysis tools like Semgrep in our CI to detect common security issues in our code and prevent security regressions.
We are committed to ensuring that as we continue to develop Gradio 5 ([and we have lots of plans!](https://huggingface.co/blog/gradio-5)), we do so in a manner that prioritizes security so that we can do our part in making machine learning applications better and safer.
Install Gradio 5 today:
`pip install --upgrade gradio`
And start [building your first Gradio 5 application](https://www.gradio.app/guides/quickstart).
| 0 |
0 | hf_public_repos | hf_public_repos/blog/ai-webtv.md | ---
title: "Building an AI WebTV"
thumbnail: /blog/assets/156_ai_webtv/thumbnail.gif
authors:
- user: jbilcke-hf
---
# Building an AI WebTV
The AI WebTV is an experimental demo to showcase the latest advancements in automatic video and music synthesis.
👉 Watch the stream now by going to the [AI WebTV Space](https://huggingface.co/spaces/jbilcke-hf/AI-WebTV).
If you are using a mobile device, you can view the stream from the [Twitch mirror](https://www.twitch.tv/ai_webtv).

## Concept
The motivation for the AI WebTV is to demo videos generated with open-source [text-to-video models](https://huggingface.co/tasks/text-to-video) such as Zeroscope and MusicGen, in an entertaining and accessible way.
You can find those open-source models on the Hugging Face hub:
- For video: [zeroscope_v2_576](https://huggingface.co/cerspense/zeroscope_v2_576w) and [zeroscope_v2_XL](https://huggingface.co/cerspense/zeroscope_v2_XL)
- For music: [musicgen-melody](https://huggingface.co/facebook/musicgen-melody)
The individual video sequences are purposely made to be short, meaning the WebTV should be seen as a tech demo/showreel rather than an actual show (with an art direction or programming).
## Architecture
The AI WebTV works by taking a sequence of [video shot](https://en.wikipedia.org/wiki/Shot_(filmmaking)) prompts and passing them to a [text-to-video model](https://huggingface.co/tasks/text-to-video) to generate a sequence of [takes](https://en.wikipedia.org/wiki/Take).
Additionally, a base theme and idea (written by a human) are passed through a LLM (in this case, ChatGPT), in order to generate a variety of individual prompts for each video clip.
Here's a diagram of the current architecture of the AI WebTV:

## Implementing the pipeline
The WebTV is implemented in NodeJS and TypeScript, and uses various services hosted on Hugging Face.
### The text-to-video model
The central video model is Zeroscope V2, a model based on [ModelScope](https://huggingface.co/damo-vilab/modelscope-damo-text-to-video-synthesis).
Zeroscope is comprised of two parts that can be chained together:
- A first pass with [zeroscope_v2_576](https://huggingface.co/cerspense/zeroscope_v2_576w), to generate a 576x320 video clip
- An optional second pass with [zeroscope_v2_XL](https://huggingface.co/cerspense/zeroscope_v2_XL) to upscale the video to 1024x576
👉 You will need to use the same prompt for both the generation and upscaling.
### Calling the video chain
To make a quick prototype, the WebTV runs Zeroscope from two duplicated Hugging Face Spaces running [Gradio](https://github.com/gradio-app/gradio/), which are called using the [@gradio/client](https://www.npmjs.com/package/@gradio/client) NPM package. You can find the original spaces here:
- [zeroscope-v2](https://huggingface.co/spaces/hysts/zeroscope-v2/tree/main) by @hysts
- [Zeroscope XL](https://huggingface.co/spaces/fffiloni/zeroscope-XL) by @fffiloni
Other spaces deployed by the community can also be found if you [search for Zeroscope on the Hub](https://huggingface.co/spaces?search=zeroscope).
👉 Public Spaces may become overcrowded and paused at any time. If you intend to deploy your own system, please duplicate those Spaces and run them under your own account.
### Using a model hosted on a Space
Spaces using Gradio have the ability to [expose a REST API](https://www.gradio.app/guides/sharing-your-app#api-page), which can then be called from Node using the [@gradio/client](https://www.npmjs.com/package/@gradio/client) module.
Here is an example:
```typescript
import { client } from "@gradio/client"
export const generateVideo = async (prompt: string) => {
const api = await client("*** URL OF THE SPACE ***")
// call the "run()" function with an array of parameters
const { data } = await api.predict("/run", [
prompt,
42, // seed
24, // nbFrames
35 // nbSteps
])
const { orig_name } = data[0][0]
const remoteUrl = `${instance}/file=${orig_name}`
// the file can then be downloaded and stored locally
}
```
### Post-processing
Once an individual take (a video clip) is upscaled, it is then passed to FILM (Frame Interpolation for Large Motion), a frame interpolation algorithm:
- Original links: [website](https://film-net.github.io/), [source code](https://github.com/google-research/frame-interpolation)
- Model on Hugging Face: [/frame-interpolation-film-style](https://huggingface.co/akhaliq/frame-interpolation-film-style)
- A Hugging Face Space you can duplicate: [video_frame_interpolation](https://huggingface.co/spaces/fffiloni/video_frame_interpolation/blob/main/app.py) by @fffiloni
During post-processing, we also add music generated with MusicGen:
- Original links: [website](https://ai.honu.io/papers/musicgen/), [source code](https://github.com/facebookresearch/audiocraft)
- Hugging Face Space you can duplicate: [MusicGen](https://huggingface.co/spaces/facebook/MusicGen)
### Broadcasting the stream
Note: there are multiple tools you can use to create a video stream. The AI WebTV currently uses [FFmpeg](https://ffmpeg.org/documentation.html) to read a playlist made of mp4 videos files and m4a audio files.
Here is an example of creating such a playlist:
```typescript
import { promises as fs } from "fs"
import path from "path"
const allFiles = await fs.readdir("** PATH TO VIDEO FOLDER **")
const allVideos = allFiles
.map(file => path.join(dir, file))
.filter(filePath => filePath.endsWith('.mp4'))
let playlist = 'ffconcat version 1.0\n'
allFilePaths.forEach(filePath => {
playlist += `file '${filePath}'\n`
})
await fs.promises.writeFile("playlist.txt", playlist)
```
This will generate the following playlist content:
```bash
ffconcat version 1.0
file 'video1.mp4'
file 'video2.mp4'
...
```
FFmpeg is then used again to read this playlist and send a [FLV stream](https://en.wikipedia.org/wiki/Flash_Video) to a [RTMP server](https://en.wikipedia.org/wiki/Real-Time_Messaging_Protocol). FLV is an old format but still popular in the world of real-time streaming due to its low latency.
```bash
ffmpeg -y -nostdin \
-re \
-f concat \
-safe 0 -i channel_random.txt -stream_loop -1 \
-loglevel error \
-c:v libx264 -preset veryfast -tune zerolatency \
-shortest \
-f flv rtmp://<SERVER>
```
There are many different configuration options for FFmpeg, for more information in the [official documentation](http://trac.ffmpeg.org/wiki/StreamingGuide).
For the RTMP server, you can find [open-source implementations on GitHub](https://github.com/topics/rtmp-server), such as the [NGINX-RTMP module](https://github.com/arut/nginx-rtmp-module).
The AI WebTV itself uses [node-media-server](https://github.com/illuspas/Node-Media-Server).
💡 You can also directly stream to [one of the Twitch RTMP entrypoints](https://help.twitch.tv/s/twitch-ingest-recommendation?language=en_US). Check out the Twitch documentation for more details.
## Observations and examples
Here are some examples of the generated content.
The first thing we notice is that applying the second pass of Zeroscope XL significantly improves the quality of the image. The impact of frame interpolation is also clearly visible.
### Characters and scene composition
<figure class="image flex flex-col items-center text-center m-0 w-full">
<video
alt="demo4.mp4"
autoplay loop autobuffer muted playsinline
>
<source src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/blog/156_ai_webtv/demo4.mp4" type="video/mp4">
</video>
<figcaption>Prompt: <i>Photorealistic movie of a <strong>llama acting as a programmer, wearing glasses and a hoodie</strong>, intensely <strong>staring at a screen</strong> with lines of code, in a cozy, <strong>dimly lit room</strong>, Canon EOS, ambient lighting, high details, cinematic, trending on artstation</i></figcaption>
</figure>
<figure class="image flex flex-col items-center text-center m-0 w-full">
<video
alt="demo5.mp4"
autoplay loop autobuffer muted playsinline
>
<source src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/blog/156_ai_webtv/demo5.mp4" type="video/mp4">
</video>
<figcaption>Prompt: <i>3D rendered animation showing a group of food characters <strong>forming a pyramid</strong>, with a <strong>banana</strong> standing triumphantly on top. In a city with <strong>cotton candy clouds</strong> and <strong>chocolate road</strong>, Pixar's style, CGI, ambient lighting, direct sunlight, rich color scheme, ultra realistic, cinematic, photorealistic.</i></figcaption>
</figure>
<figure class="image flex flex-col items-center text-center m-0 w-full">
<video
alt="demo7.mp4"
autoplay loop autobuffer muted playsinline
>
<source src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/blog/156_ai_webtv/demo7.mp4" type="video/mp4">
</video>
<figcaption>Prompt: <i>Intimate <strong>close-up of a red fox, gazing into the camera with sharp eyes</strong>, ambient lighting creating a high contrast silhouette, IMAX camera, <strong>high detail</strong>, <strong>cinematic effect</strong>, golden hour, film grain.</i></figcaption>
</figure>
### Simulation of dynamic scenes
Something truly fascinating about text-to-video models is their ability to emulate real-life phenomena they have been trained on.
We've seen it with large language models and their ability to synthesize convincing content that mimics human responses, but this takes things to a whole new dimension when applied to video.
A video model predicts the next frames of a scene, which might include objects in motion such as fluids, people, animals, or vehicles. Today, this emulation isn't perfect, but it will be interesting to evaluate future models (trained on larger or specialized datasets, such as animal locomotion) for their accuracy when reproducing physical phenomena, and also their ability to simulate the behavior of agents.
<figure class="image flex flex-col items-center text-center m-0 w-full">
<video
alt="demo17.mp4"
autoplay loop autobuffer muted playsinline
>
<source src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/blog/156_ai_webtv/demo17.mp4" type="video/mp4">
</video>
<figcaption>Prompt: <i>Cinematic movie shot of <strong>bees energetically buzzing around a flower</strong>, sun rays illuminating the scene, captured in 4k IMAX with a soft bokeh background.</i></figcaption>
</figure>
<figure class="image flex flex-col items-center text-center m-0 w-full">
<video
alt="demo8.mp4"
autoplay loop autobuffer muted playsinline
>
<source src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/blog/156_ai_webtv/demo8.mp4" type="video/mp4">
</video>
<figcaption>Prompt: <i><strong>Dynamic footage of a grizzly bear catching a salmon in a rushing river</strong>, ambient lighting highlighting the splashing water, low angle, IMAX camera, 4K movie quality, golden hour, film grain.</i></figcaption>
</figure>
<figure class="image flex flex-col items-center text-center m-0 w-full">
<video
alt="demo18.mp4"
autoplay loop autobuffer muted playsinline
>
<source src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/blog/156_ai_webtv/demo18.mp4" type="video/mp4">
</video>
<figcaption>Prompt: <i>Aerial footage of a quiet morning at the coast of California, with <strong>waves gently crashing against the rocky shore</strong>. A startling sunrise illuminates the coast with vibrant colors, captured beautifully with a DJI Phantom 4 Pro. Colors and textures of the landscape come alive under the soft morning light. Film grain, cinematic, imax, movie</i></figcaption>
</figure>
💡 It will be interesting to see these capabilities explored more in the future, for instance by training video models on larger video datasets covering more phenomena.
### Styling and effects
<figure class="image flex flex-col items-center text-center m-0 w-full">
<video
alt="demo0.mp4"
autoplay loop autobuffer muted playsinline
>
<source src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/blog/156_ai_webtv/demo0.mp4" type="video/mp4">
</video>
<figcaption>Prompt: <i>
<strong>3D rendered video</strong> of a friendly broccoli character wearing a hat, walking in a candy-filled city street with gingerbread houses, under a <strong>bright sun and blue skies</strong>, <strong>Pixar's style</strong>, cinematic, photorealistic, movie, <strong>ambient lighting</strong>, natural lighting, <strong>CGI</strong>, wide-angle view, daytime, ultra realistic.</i>
</figcaption>
</figure>
<figure class="image flex flex-col items-center text-center m-0 w-full">
<video
alt="demo2.mp4"
autoplay loop autobuffer muted playsinline
>
<source src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/blog/156_ai_webtv/demo2.mp4" type="video/mp4">
</video>
<figcaption>Prompt: <i><strong>Cinematic movie</strong>, shot of an astronaut and a llama at dawn, the mountain landscape bathed in <strong>soft muted colors</strong>, early morning fog, dew glistening on fur, craggy peaks, vintage NASA suit, Canon EOS, high detailed skin, epic composition, high quality, 4K, trending on artstation, beautiful</i>
</figcaption>
</figure>
<figure class="image flex flex-col items-center text-center m-0 w-full">
<video
alt="demo1.mp4"
autoplay loop autobuffer muted playsinline
>
<source src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/blog/156_ai_webtv/demo1.mp4" type="video/mp4">
</video>
<figcaption>Prompt: <i>Panda and black cat <strong>navigating down the flowing river</strong> in a small boat, Studio Ghibli style > Cinematic, beautiful composition > IMAX <strong>camera panning following the boat</strong> > High quality, cinematic, movie, mist effect, film grain, trending on Artstation</i>
</figcaption>
</figure>
### Failure cases
**Wrong direction:** the model sometimes has trouble with movement and direction. For instance, here the clip seems to be played in reverse. Also the modifier keyword ***green*** was not taken into account.
<figure class="image flex flex-col items-center text-center m-0 w-full">
<video
alt="fail1.mp4"
autoplay loop autobuffer muted playsinline
>
<source src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/blog/156_ai_webtv/fail1.mp4" type="video/mp4">
</video>
<figcaption>Prompt: <i>Movie showing a <strong>green pumpkin</strong> falling into a bed of nails, slow-mo explosion with chunks flying all over, ambient fog adding to the dramatic lighting, filmed with IMAX camera, 8k ultra high definition, high quality, trending on artstation.</i>
</figcaption>
</figure>
**Rendering errors on realistic scenes:** sometimes we can see artifacts such as moving vertical lines or waves. It is unclear what causes this, but it may be due to the combination of keywords used.
<figure class="image flex flex-col items-center text-center m-0 w-full">
<video
alt="fail2.mp4"
autoplay loop autobuffer muted playsinline
>
<source src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/blog/156_ai_webtv/fail2.mp4" type="video/mp4">
</video>
<figcaption>Prompt: <i>Film shot of a captivating flight above the Grand Canyon, ledges and plateaus etched in orange and red. <strong>Deep shadows contrast</strong> with the fiery landscape under the midday sun, shot with DJI Phantom 4 Pro. The camera rotates to capture the vastness, <strong>textures</strong> and colors, in imax quality. Film <strong>grain</strong>, cinematic, movie.</i>
</figcaption>
</figure>
**Text or objects inserted into the image:** the model sometimes injects words from the prompt into the scene, such as "IMAX". Mentioning "Canon EOS" or "Drone footage" in the prompt can also make those objects appear in the video.
In the following example, we notice the word "llama" inserts a llama but also two occurrences of the word llama in flames.
<figure class="image flex flex-col items-center text-center m-0 w-full">
<video
alt="fail3.mp4"
autoplay loop autobuffer muted playsinline
>
<source src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/blog/156_ai_webtv/fail3.mp4" type="video/mp4">
</video>
<figcaption>Prompt: <i>Movie scene of a <strong>llama</strong> acting as a firefighter, in firefighter uniform, dramatically spraying water at <strong>roaring flames</strong>, amidst a chaotic urban scene, Canon EOS, ambient lighting, high quality, award winning, highly detailed fur, cinematic, trending on artstation.</i>
</figcaption>
</figure>
## Recommendations
Here are some early recommendations that can be made from the previous observations:
### Using video-specific prompt keywords
You may already know that if you don’t prompt a specific aspect of the image with Stable Diffusion, things like the color of clothes or the time of the day might become random, or be assigned a generic value such as a neutral mid-day light.
The same is true for video models: you will want to be specific about things. Examples include camera and character movement, their orientation, speed and direction. You can leave it unspecified for creative purposes (idea generation), but this might not always give you the results you want (e.g., entities animated in reverse).
### Maintaining consistency between scenes
If you plan to create sequences of multiple videos, you will want to make sure you add as many details as possible in each prompt, otherwise you may lose important details from one sequence to another, such as the color.
💡 This will also improve the quality of the image since the prompt is used for the upscaling part with Zeroscope XL.
### Leverage frame interpolation
Frame interpolation is a powerful tool which can repair small rendering errors and turn many defects into features, especially in scenes with a lot of animation, or where a cartoon effect is acceptable. The [FILM algorithm](https://film-net.github.io/) will smoothen out elements of a frame with previous and following events in the video clip.
This works great to displace the background when the camera is panning or rotating, and will also give you creative freedom, such as control over the number of frames after the generation, to make slow-motion effects.
## Future work
We hope you enjoyed watching the AI WebTV stream and that it will inspire you to build more in this space.
As this was a first trial, a lot of things were not the focus of the tech demo: generating longer and more varied sequences, adding audio (sound effects, dialogue), generating and orchestrating complex scenarios, or letting a language model agent have more control over the pipeline.
Some of these ideas may make their way into future updates to the AI WebTV, but we also can’t wait to see what the community of researchers, engineers and builders will come up with!
| 1 |
0 | hf_public_repos | hf_public_repos/blog/convert-transformers-to-onnx.md | ---
title: 'Convert Transformers to ONNX with Hugging Face Optimum'
thumbnail: /blog/assets/81_convert_transformers_to_onnx/thumbnail.png
authors:
- user: philschmid
---
# Convert Transformers to ONNX with Hugging Face Optimum
Hundreds of Transformers experiments and models are uploaded to the [Hugging Face Hub](https://huggingface.co/) every single day. Machine learning engineers and students conducting those experiments use a variety of frameworks like PyTorch, TensorFlow/Keras, or others. These models are already used by thousands of companies and form the foundation of AI-powered products.
If you deploy Transformers models in production environments, we recommend exporting them first into a serialized format that can be loaded, optimized, and executed on specialized runtimes and hardware.
In this guide, you'll learn about:
1. [What is ONNX?](#1-what-is-onnx)
2. [What is Hugging Face Optimum?](#2-what-is-hugging-face-optimum)
3. [What Transformers architectures are supported?](#3-what-transformers-architectures-are-supported)
4. [How can I convert a Transformers model (BERT) to ONNX?](#4-how-can-i-convert-a-transformers-model-bert-to-onnx)
5. [What's next?](#5-whats-next)
Let's get started! 🚀
---
If you are interested in optimizing your models to run with maximum efficiency, check out the [🤗 Optimum library](https://github.com/huggingface/optimum).
<html itemscope itemtype="https://schema.org/FAQPage">
<div itemscope itemprop="mainEntity" itemtype="https://schema.org/Question">
<a id="1-what-is-onnx"><h2 itemprop="name"> 1. What is ONNX?</h2></a>
<div itemscope itemprop="acceptedAnswer" itemtype="https://schema.org/Answer">
<div itemprop="text">
The [ONNX or Open Neural Network eXchange](https://onnx.ai) is an open standard and format to represent machine learning models. ONNX defines a common set of operators and a common file format to represent deep learning models in a wide variety of frameworks, including PyTorch and TensorFlow.
<figure class="image table text-center m-0 w-full">
<img src="assets/81_convert_transformers_to_onnx/graph.png" alt="Netron ONNX Graph"/>
<figcaption>pseudo ONNX graph, visualized with NETRON</figcaption>
</figure>
When a model is exported to the ONNX format, these operators are used to construct a computational graph (often called an `intermediate representation`) which represents the flow of data through the neural network.
> **Important:** ONNX Is not a Runtime ONNX is only the representation that can be used with runtimes like ONNX Runtime. You can find a list of supported accelerators [here](https://onnx.ai/supported-tools.html#deployModel).
>
➡️[Learn more about ONNX.](https://onnx.ai/about.html)
</div>
</div>
</div>
<html itemscope itemtype="https://schema.org/FAQPage">
<div itemscope itemprop="mainEntity" itemtype="https://schema.org/Question">
<a id="2-what-is-hugging-face-optimum"><h2 itemprop="name"> 2. What is Hugging Face Optimum?</h2></a>
<div itemscope itemprop="acceptedAnswer" itemtype="https://schema.org/Answer">
<div itemprop="text">
[Hugging Face Optimum](https://github.com/huggingface/optimum) is an open-source library and an extension of [Hugging Face Transformers](https://github.com/huggingface/transformers), that provides a unified API of performance optimization tools to achieve maximum efficiency to train and run models on accelerated hardware, including toolkits for optimized performance on [Graphcore IPU](https://github.com/huggingface/optimum-graphcore) and [Habana Gaudi](https://github.com/huggingface/optimum-habana).
Optimum can be used for converting, quantization, graph optimization, accelerated training & inference with support for [transformers pipelines](https://huggingface.co/docs/transformers/main/en/main_classes/pipelines#pipelines).
Below you can see a typical developer journey of how you can leverage Optimum with ONNX.
<figure class="image table text-center m-0 w-full">
<img src="assets/81_convert_transformers_to_onnx/user-journey.png" alt="developer journey optimum"/>
</figure>
[➡️ Learn more about Optimum](https://huggingface.co/blog/hardware-partners-program)
</div>
</div>
</div>
<html itemscope itemtype="https://schema.org/FAQPage">
<div itemscope itemprop="mainEntity" itemtype="https://schema.org/Question">
<a id="3-what-transformers-architectures-are-supported"><h2 itemprop="name"> 3. What Transformers architectures are supported?</h2></a>
<div itemscope itemprop="acceptedAnswer" itemtype="https://schema.org/Answer">
<div itemprop="text">
A list of all supported Transformers architectures can be found in the [ONNX section of the Transformers documentation](https://huggingface.co/docs/transformers/serialization#onnx). Below is an excerpt of the most commonly used architectures which can be converted to ONNX and optimized with [Hugging Face Optimum](https://huggingface.co/docs/optimum/index)
- ALBERT
- BART
- BERT
- DistilBERT
- ELECTRA
- GPT Neo
- GPT-J
- GPT-2
- RoBERTa
- T5
- ViT
- XLM
- …
[➡️ All supported architectures](https://huggingface.co/docs/transformers/serialization#onnx)
</div>
</div>
</div>
<html itemscope itemtype="https://schema.org/FAQPage">
<div itemscope itemprop="mainEntity" itemtype="https://schema.org/Question">
<a id="4-how-can-i-convert-a-transformers-model-bert-to-onnx"><h2 itemprop="name">4. How can I convert a Transformers model (BERT) to ONNX?</h2></a>
<div itemscope itemprop="acceptedAnswer" itemtype="https://schema.org/Answer">
<div itemprop="text">
There are currently three ways to convert your Hugging Face Transformers models to ONNX. In this section, you will learn how to export [distilbert-base-uncased-finetuned-sst-2-english](https://huggingface.co/distilbert-base-uncased-finetuned-sst-2-english) for `text-classification` using all three methods going from the low-level `torch` API to the most user-friendly high-level API of `optimum`. Each method will do exactly the same
### Export with `torch.onnx` (low-level)
[torch.onnx](https://pytorch.org/docs/stable/onnx.html) enables you to convert model checkpoints to an ONNX graph by the `export` method. But you have to provide a lot of values like `input_names`, `dynamic_axes`, etc.
You’ll first need to install some dependencies:
```python
pip install transformers torch
```
exporting our checkpoint with `export`
```python
import torch
from transformers import AutoModelForSequenceClassification, AutoTokenizer
# load model and tokenizer
model_id = "distilbert-base-uncased-finetuned-sst-2-english"
model = AutoModelForSequenceClassification.from_pretrained(model_id)
tokenizer = AutoTokenizer.from_pretrained(model_id)
dummy_model_input = tokenizer("This is a sample", return_tensors="pt")
# export
torch.onnx.export(
model,
tuple(dummy_model_input.values()),
f="torch-model.onnx",
input_names=['input_ids', 'attention_mask'],
output_names=['logits'],
dynamic_axes={'input_ids': {0: 'batch_size', 1: 'sequence'},
'attention_mask': {0: 'batch_size', 1: 'sequence'},
'logits': {0: 'batch_size', 1: 'sequence'}},
do_constant_folding=True,
opset_version=13,
)
```
### Export with `transformers.onnx` (mid-level)
[transformers.onnx](https://huggingface.co/docs/transformers/serialization#exporting-a-model-to-onnx) enables you to convert model checkpoints to an ONNX graph by leveraging configuration objects. That way you don’t have to provide the complex configuration for `dynamic_axes` etc.
You’ll first need to install some dependencies:
```python
pip install transformers[onnx] torch
```
Exporting our checkpoint with the `transformers.onnx`.
```python
from pathlib import Path
import transformers
from transformers.onnx import FeaturesManager
from transformers import AutoConfig, AutoTokenizer, AutoModelForSequenceClassification
# load model and tokenizer
model_id = "distilbert-base-uncased-finetuned-sst-2-english"
feature = "sequence-classification"
model = AutoModelForSequenceClassification.from_pretrained(model_id)
tokenizer = AutoTokenizer.from_pretrained(model_id)
# load config
model_kind, model_onnx_config = FeaturesManager.check_supported_model_or_raise(model, feature=feature)
onnx_config = model_onnx_config(model.config)
# export
onnx_inputs, onnx_outputs = transformers.onnx.export(
preprocessor=tokenizer,
model=model,
config=onnx_config,
opset=13,
output=Path("trfs-model.onnx")
)
```
### Export with Optimum (high-level)
[Optimum](https://huggingface.co/docs/optimum/onnxruntime/modeling_ort#switching-from-transformers-to-optimum-inference) Inference includes methods to convert vanilla Transformers models to ONNX using the `ORTModelForXxx` classes. To convert your Transformers model to ONNX you simply have to pass `from_transformers=True` to the `from_pretrained()` method and your model will be loaded and converted to ONNX leveraging the [transformers.onnx](https://huggingface.co/docs/transformers/serialization#exporting-a-model-to-onnx) package under the hood.
You’ll first need to install some dependencies:
```python
pip install optimum[onnxruntime]
```
Exporting our checkpoint with `ORTModelForSequenceClassification`
```python
from optimum.onnxruntime import ORTModelForSequenceClassification
model = ORTModelForSequenceClassification.from_pretrained("distilbert-base-uncased-finetuned-sst-2-english",from_transformers=True)
```
The best part about the conversion with Optimum is that you can immediately use the `model` to run predictions or load it [inside a pipeline.](https://huggingface.co/docs/optimum/onnxruntime/modeling_ort#switching-from-transformers-to-optimum-inference)
</div>
</div>
</div>
## 5. What's next?
Since you successfully convert your Transformers model to ONNX the whole set of optimization and quantization tools is now open to use. Potential next steps can be:
- Use the onnx model for [Accelerated Inference with Optimum and Transformers Pipelines](https://huggingface.co/blog/optimum-inference)
- Apply [static quantization to your model](https://www.philschmid.de/static-quantization-optimum) for ~3x latency improvements
- Use ONNX runtime for [training](https://github.com/huggingface/optimum/tree/main/examples/onnxruntime/training)
- Convert your ONNX model to [TensorRT](https://docs.nvidia.com/deeplearning/tensorrt/api/python_api/) to improve GPU performance
- …
If you are interested in optimizing your models to run with maximum efficiency, check out the [🤗 Optimum library](https://github.com/huggingface/optimum).
---
Thanks for reading! If you have any questions, feel free to contact me, through [Github](https://github.com/huggingface/transformers), or on the [forum](https://discuss.huggingface.co/c/optimum/59). You can also connect with me on [Twitter](https://twitter.com/_philschmid) or [LinkedIn](https://www.linkedin.com/in/philipp-schmid-a6a2bb196/).
</html>
| 2 |
0 | hf_public_repos | hf_public_repos/blog/cost-efficient-rag-applications-with-intel.md | ---
title: "Building Cost-Efficient Enterprise RAG applications with Intel Gaudi 2 and Intel Xeon"
thumbnail: /blog/assets/cost_efficient_rag_applications_with_intel/main.jpg
authors:
- user: juliensimon
- user: Haihao
guest: true
org: Intel
- user: antonyvance
guest: true
org: Intel
- user: MatrixYao
guest: true
org: Intel
- user: lianglv
guest: true
org: Intel
- user: Suleyman Sair
guest: true
org: Intel
- user: gserochi
guest: true
org: Intel
- user: Debbh
guest: true
org: Intel
- user: kding1
guest: true
org: Intel
---
# Building Cost-Efficient Enterprise RAG applications with Intel Gaudi 2 and Intel Xeon
<p align="center">
<img src="assets/cost_efficient_rag_applications_with_intel/main.jpg" width="512"><br>
</p>
Retrieval-augmented generation (RAG) enhances text generation with a large language model by incorporating fresh domain knowledge stored in an external datastore. Separating your company data from the knowledge learned by language models during training is essential to balance performance, accuracy, and security privacy goals.
In this blog, you will learn how Intel can help you develop and deploy RAG applications as part of [OPEA](https://opea.dev), the Open Platform for Enterprise AI. You will also discover how Intel Gaudi 2 AI accelerators and Xeon CPUs can significantly enhance enterprise performance through a real-world RAG use case.
# Getting started
Before diving into the details, let’s access the hardware first. [Intel Gaudi 2](https://habana.ai/products/gaudi2/) is purposely built to accelerate deep learning training and inference in the data center and cloud. It is publicly available on the [Intel Developer Cloud](https://www.intel.com/content/www/us/en/developer/tools/devcloud/overview.html) (IDC) and for on-premises implementations. IDC is the easiest way to start with Gaudi 2. If you don’t have an account yet, please register for one, subscribe to “Premium,” and then apply for access.
On the software side, we will build our application with LangChain, an open-source framework designed to simplify the creation of AI applications with LLMs. It provides template-based solutions, allowing developers to build RAG applications with custom embeddings, vector databases, and LLMs. The LangChain documentation provides more information. Intel has been actively contributing multiple optimizations to LangChain, enabling developers to deploy GenAI applications efficiently on Intel platforms.
In LangChain, we will use the `rag-redis` template to create our RAG application, with the [BAAI/bge-base-en-v1.5](https://huggingface.co/BAAI/bge-base-en-v1.5) embedding model and Redis as the default vector database. The diagram below shows the high-level architecture.
<kbd>
<img src="assets/cost_efficient_rag_applications_with_intel/01.png">
</kbd>
The embedding model will run on an [Intel Granite Rapids](https://www.intel.com/content/www/us/en/newsroom/news/intel-unveils-future-generation-xeon.html#gs.6t3deu) CPU. The Intel Granite Rapids architecture is optimized to deliver the lowest total cost of ownership (TCO) for high-core performance-sensitive workloads and general-purpose compute workloads. GNR also supports the AMX-FP16 instruction set, resulting in a 2-3x performance increase for mixed AI workloads.
The LLM will run on an Intel Gaudi 2 accelerator. Regarding Hugging Face models, the [Optimum Habana](https://huggingface.co/docs/optimum/en/habana/index) library is the interface between the Hugging Face [Transformers](https://huggingface.co/docs/transformers/index) and [Diffusers](https://huggingface.co/docs/diffusers/index) libraries and Gaudi. It offers tools for easy model loading, training, and inference on single- and multi-card settings for various downstream tasks.
We provide a [Dockerfile](https://github.com/opea-project/GenAIExamples/tree/main/ChatQnA/langchain/docker) to streamline the setup of the LangChain development environment. Once you have launched the Docker container, you can start building the vector database, the RAG pipeline, and the LangChain application within the Docker environment. For a detailed step-by-step, follow the [ChatQnA](https://github.com/opea-project/GenAIExamples/tree/main/ChatQnA) example.
# Creating the Vector Database
To populate the vector database, we use public financial documents from Nike. Here is the sample code.
```
# Ingest PDF files that contain Edgar 10k filings data for Nike.
company_name = "Nike"
data_path = "data"
doc_path = [os.path.join(data_path, file) for file in os.listdir(data_path)][0]
content = pdf_loader(doc_path)
chunks = text_splitter.split_text(content)
# Create vectorstore
embedder = HuggingFaceEmbeddings(model_name=EMBED_MODEL)
_ = Redis.from_texts(
texts=[f"Company: {company_name}. " + chunk for chunk in chunks],
embedding=embedder,
index_name=INDEX_NAME,
index_schema=INDEX_SCHEMA,
redis_url=REDIS_URL,
)
```
# Defining the RAG Pipeline
In LangChain, we use the Chain API to connect the prompt, the vector database, and the embedding model.
The complete code is available in the [repository](https://github.com/opea-project/GenAIExamples/blob/main/ChatQnA/langchain/redis/rag_redis/chain.py).
```
# Embedding model running on Xeon CPU
embedder = HuggingFaceEmbeddings(model_name=EMBED_MODEL)
# Redis vector database
vectorstore = Redis.from_existing_index(
embedding=embedder, index_name=INDEX_NAME, schema=INDEX_SCHEMA, redis_url=REDIS_URL
)
# Retriever
retriever = vectorstore.as_retriever(search_type="mmr")
# Prompt template
template = """…"""
prompt = ChatPromptTemplate.from_template(template)
# Hugging Face LLM running on Gaudi 2
model = HuggingFaceEndpoint(endpoint_url=TGI_LLM_ENDPOINT, …)
# RAG chain
chain = (
RunnableParallel({"context": retriever, "question": RunnablePassthrough()}) | prompt | model | StrOutputParser()
).with_types(input_type=Question)
```
# Loading the LLM on Gaudi 2
We will run our chat model on Gaudi2 with the Hugging Face Text Generation Inference (TGI) server. This combination enables high-performance text generation for popular open-source LLMs on Gaudi2 hardware, such as MPT, Llama, and Mistral.
No setup is required. We can use a pre-built Docker image and pass the model name (e.g., Intel NeuralChat).
```
model=Intel/neural-chat-7b-v3-3
volume=$PWD/data
docker run -p 8080:80 -v $volume:/data --runtime=habana -e HABANA_VISIBLE_DEVICES=all -e OMPI_MCA_btl_vader_single_copy_mechanism=none --cap-add=sys_nice --ipc=host tgi_gaudi --model-id $model
```
The service uses a single Gaudi accelerator by default. Multiple accelerators may be required to run a larger model (e.g., 70B). In that case, please add the appropriate parameters, e.g. `--sharded true` and `--num_shard 8`. For gated models such as [Llama](https://huggingface.co/meta-llama) or [StarCoder](https://huggingface.co/bigcode/starcoder), you will also need to specify `-e HUGGING_FACE_HUB_TOKEN=<token>` using your Hugging Face [token](https://huggingface.co/docs/hub/en/security-tokens).
Once the container runs, we check that the service works by sending a request to the TGI endpoint.
```
curl localhost:8080/generate -X POST \
-d '{"inputs":"Which NFL team won the Super Bowl in the 2010 season?", \
"parameters":{"max_new_tokens":128, "do_sample": true}}' \
-H 'Content-Type: application/json'
```
If you see a generated response, the LLM is running correctly and you can now enjoy high-performance inference on Gaudi 2!
The TGI Gaudi container utilizes the bfloat16 data type by default. For higher throughput, you may want to enable FP8 quantization. According to our test results, FP8 quantization should yield a 1.8x throughput increase gain compared to BF16. FP8 instructions are available in the [README](https://github.com/opea-project/GenAIExamples/blob/main/ChatQnA/README.md) file.
Lastly, you can enable content moderation with the Meta [Llama Guard](https://huggingface.co/meta-llama/LlamaGuard-7b) model. The [README](https://github.com/opea-project/GenAIExamples/blob/main/ChatQnA/README.md) file provides instructions for deploying Llama Guard on TGI Gaudi.
# Running the RAG service
We use the following instructions to launch the RAG application backend service. The `server.py` script defines the service endpoints using fastAPI.
```
docker exec -it qna-rag-redis-server bash
nohup python app/server.py &
```
By default, the TGI Gaudi endpoint is expected to run on localhost at port 8080 (i.e. `http://127.0.0.1:8080`). If it is running at a different address or port, please set the `TGI_ENDPOINT ` environment variable accordingly.
# Launching the RAG GUI
We use the instructions below to install the frontend GUI components.
```
sudo apt-get install npm && \
npm install -g n && \
n stable && \
hash -r && \
npm install -g npm@latest
```
Then, we update the `DOC_BASE_URL` environment variable in the `.env` file by replacing the localhost IP address (`127.0.0.1`) with the actual IP address of the server where the GUI runs.
We run the following command to install the required dependencies:
```
npm install
```
Finally, we start the GUI server with the following command:
```
nohup npm run dev &
```
This will run the frontend service and launch the application.
<kbd>
<img src="assets/cost_efficient_rag_applications_with_intel/02.png">
</kbd>
# Benchmark Results
We did intensive experiments with different models and configurations. The two figures below show the relative end-to-end throughput and performance per dollar comparison for the Llama2-70B model with 16 concurrent users on four Intel Gaudi 2 and four Nvidia H100 platforms.
<kbd>
<img src="assets/cost_efficient_rag_applications_with_intel/03.png">
</kbd>
<kbd>
<img src="assets/cost_efficient_rag_applications_with_intel/04.png">
</kbd>
In both cases, the same Intel Granite Rapids CPU platform is used for vector databases and embedding models. For performance per dollar comparison, we use publicly available pricing to compute an average training performance per dollar, the same as the one reported by the [MosaicML](https://www.databricks.com/blog/llm-training-and-inference-intel-gaudi2-ai-accelerators) team in January 2024.
As you can see, the H100-based system has 1.13x more throughput but can only deliver 0.44x performance per dollar compared to Gaudi 2. These comparisons may vary based on customer-specific discounts on different cloud providers. Detailed benchmark configurations are listed at the end of the post.
# Conclusion
The example above deployment successfully demonstrates a RAG-based chatbot on Intel platforms. Furthermore, as Intel keeps releasing ready-to-go GenAI examples, developers benefit from validated tools that simplify the creation and deployment process. These examples offer versatility and ease of customization, making them ideal for a wide range of applications on Intel platforms.
When running enterprise AI applications, the total cost of ownership is more favorable with systems based on Intel Granite Rapids CPUs and Gaudi 2 accelerators. Further improvements can be achieved with FP8 optimization.
The following developer resources should help you kickstart your GenAI projects confidently.
* [OPEA GenAI examples](https://github.com/opea-project/GenAIExamples)
* [Text Generation Inference on Gaudi 2](https://github.com/huggingface/tgi-gaudi)
* [Intel AIML Ecosystem: Hugging Face](https://www.intel.com/content/www/us/en/developer/ecosystem/hugging-face.html)
* [The Intel organization page on the Hugging Face hub](https://huggingface.co/Intel)
If you have questions or feedback, we'd love to answer them on the [Hugging Face forum](https://discuss.huggingface.co/). Thanks for reading!
**Acknowledgements**:
We want to thank Chaitanya Khened, Suyue Chen, Mikolaj Zyczynski, Wenjiao Yue, Wenxin Zhang, Letong Han, Sihan Chen, Hanwen Cheng, Yuan Wu, and Yi Wang for their outstanding contributions to building enterprise-grade RAG systems on Intel Gaudi 2.
---
**Benchmark configurations**
* Gaudi2 configurations: HLS-Gaudi2 with eight Habana Gaudi2 HL-225H Mezzanine cards and two Intel® Xeon® Platinum 8380 CPU @ 2.30GHz, and 1TB of System Memory; OS: Ubuntu 22.04.03, 5.15.0 kernel
* H100 SXM Configurations: Lambda labs instance gpu_8x_h100_sxm5; 8xH100 SXM and Two Intel Xeon® Platinum 8480 CPU@2 GHz and 1.8TB of system memory; OS ubuntu 20.04.6 LTS, 5.15.0 kernel
* Intel Xeon: Pre-production Granite Rapids platform with 2Sx120C @ 1.9GHz and 8800 MCR DIMMs with 1.5TB system memory. OS: Cent OS 9, 6.2.0 kernel
* Llama2 70B is deployed to 4 cards (queries normalized to 8 cards). BF16 for Gaudi2 and FP16 for H100.
* Embedding model is BAAI/bge-base v1.5. Tested with: TGI-gaudi 1.2.1, TGI-GPU 1.4.5 Python 3.11.7, Langchain 0.1.11, sentence-transformers 2.5.1, langchain benchmarks 0.0.10, redis 5.0.2, cuda 12.2.r12.2/compiler.32965470 0, TEI 1.2.0,
* RAG queries max input length 1024, max output length 128. Test dataset: langsmith Q&A. Number of concurrent clients 16
* TGI parameters for Gaudi2 (70B): `batch_bucket_size=22`, `prefill_batch_bucket_size=4`, `max_batch_prefill_tokens=5102`, `max_batch_total_tokens=32256`, `max_waiting_tokens=5`, `streaming=false`
* TGI parameters for H100 (70B): `batch_bucket_size=8`, `prefill_batch_bucket_size=4`, `max_batch_prefill_tokens=4096`, `max_batch_total_tokens=131072`, `max_waiting_tokens=20`, `max_batch_size=128`, `streaming=false`
* TCO Reference: [https://www.databricks.com/blog/llm-training-and-inference-intel-gaudi2-ai-accelerators](https://www.databricks.com/blog/llm-training-and-inference-intel-gaudi2-ai-accelerators)
| 3 |
0 | hf_public_repos | hf_public_repos/blog/audio-datasets.md | ---
title: "A Complete Guide to Audio Datasets"
thumbnail: /blog/assets/116_audio_datasets/thumbnail.jpg
authors:
- user: sanchit-gandhi
---
# A Complete Guide to Audio Datasets
<!--- Note to reviewer: comments and TODOs are included in this format. --->
<a target="_blank" href="https://colab.research.google.com/github/sanchit-gandhi/notebooks/blob/main/audio_datasets_colab.ipynb">
<img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/>
</a>
## Introduction
🤗 Datasets is an open-source library for downloading and preparing datasets from all domains. Its minimalistic API
allows users to download and prepare datasets in just one line of Python code, with a suite of functions that
enable efficient pre-processing. The number of datasets available is unparalleled, with all the most popular
machine learning datasets available to download.
Not only this, but 🤗 Datasets comes prepared with multiple audio-specific features that make working
with audio datasets easy for researchers and practitioners alike. In this blog, we'll demonstrate these features, showcasing
why 🤗 Datasets is the go-to place for downloading and preparing audio datasets.
## Contents
1. [The Hub](#the-hub)
2. [Load an Audio Dataset](#load-an-audio-dataset)
3. [Easy to Load, Easy to Process](#easy-to-load-easy-to-process)
4. [Streaming Mode: The Silver Bullet](#streaming-mode-the-silver-bullet)
5. [A Tour of Audio Datasets on the Hub](#a-tour-of-audio-datasets-on-the-hub)
6. [Closing Remarks](#closing-remarks)
## The Hub
The Hugging Face Hub is a platform for hosting models, datasets and demos, all open source and publicly available.
It is home to a growing collection of audio datasets that span a variety of domains, tasks and languages. Through
tight integrations with 🤗 Datasets, all the datasets on the Hub can be downloaded in one line of code.
Let's head to the Hub and filter the datasets by task:
* [Speech Recognition Datasets on the Hub](https://huggingface.co/datasets?task_categories=task_categories:automatic-speech-recognition&sort=downloads)
* [Audio Classification Datasets on the Hub](https://huggingface.co/datasets?task_categories=task_categories:audio-classification&sort=downloads)
<figure>
<img src="assets/116_audio_datasets/hub_asr_datasets.jpg" alt="Trulli" style="width:100%">
</figure>
At the time of writing, there are 77 speech recognition datasets and 28 audio classification datasets on the Hub,
with these numbers ever-increasing. You can select any one of these datasets to suit your needs. Let's check out the first
speech recognition result. Clicking on [`common_voice`](https://huggingface.co/datasets/common_voice)
brings up the dataset card:
<figure>
<img src="assets/116_audio_datasets/common_voice.jpg" alt="Trulli" style="width:100%">
</figure>
Here, we can find additional information about the dataset, see what models are trained on the dataset and, most
excitingly, listen to actual audio samples. The Dataset Preview is presented in the middle of the dataset card.
It shows us the first 100 samples for each subset and split. What's more, it's loaded up the audio samples ready for us
to listen to in real-time. If we hit the play button on the first sample, we can listen to the audio and see the
corresponding text.
The Dataset Preview is a brilliant way of experiencing audio datasets before committing to using them. You can pick any
dataset on the Hub, scroll through the samples and listen to the audio for the different subsets and splits, gauging whether
it's the right dataset for your needs. Once you've selected a dataset, it's trivial to load the data so that you can
start using it.
## Load an Audio Dataset
One of the key defining features of 🤗 Datasets is the ability to download and prepare a dataset in just one line of
Python code. This is made possible through the [`load_dataset`](https://huggingface.co/docs/datasets/loading#load)
function. Conventionally, loading a dataset involves: i) downloading the raw data, ii) extracting it from its
compressed format, and iii) preparing individual samples and splits. Using `load_dataset`, all of the heavy lifting is done
under the hood.
Let's take the example of loading the [GigaSpeech](https://huggingface.co/datasets/speechcolab/gigaspeech) dataset from
Speech Colab. GigaSpeech is a relatively recent speech recognition dataset for benchmarking academic speech systems and is
one of many audio datasets available on the Hugging Face Hub.
To load the GigaSpeech dataset, we simply take the dataset's identifier on the Hub (`speechcolab/gigaspeech`) and specify it
to the [`load_dataset`](https://huggingface.co/docs/datasets/loading#load) function. GigaSpeech comes in five configurations
of increasing size, ranging from `xs` (10 hours) to `xl`(10,000 hours). For the purpose of this tutorial, we'll load the smallest
of these configurations. The dataset's identifier and the desired configuration are all that we require to download the dataset:
```python
from datasets import load_dataset
gigaspeech = load_dataset("speechcolab/gigaspeech", "xs")
print(gigaspeech)
```
**Print Output:**
```python
DatasetDict({
train: Dataset({
features: ['segment_id', 'speaker', 'text', 'audio', 'begin_time', 'end_time', 'audio_id', 'title', 'url', 'source', 'category', 'original_full_path'],
num_rows: 9389
})
validation: Dataset({
features: ['segment_id', 'speaker', 'text', 'audio', 'begin_time', 'end_time', 'audio_id', 'title', 'url', 'source', 'category', 'original_full_path'],
num_rows: 6750
})
test: Dataset({
features: ['segment_id', 'speaker', 'text', 'audio', 'begin_time', 'end_time', 'audio_id', 'title', 'url', 'source', 'category', 'original_full_path'],
num_rows: 25619
})
})
```
And just like that, we have the GigaSpeech dataset ready! There simply is no easier way of loading an audio dataset. We
can see that we have the training, validation and test splits pre-partitioned, with the corresponding information for
each.
The object `gigaspeech` returned by the `load_dataset` function is a [`DatasetDict`](https://huggingface.co/docs/datasets/package_reference/main_classes#datasets.DatasetDict).
We can treat it in much the same way as an ordinary Python dictionary. To get the train split, we pass the corresponding
key to the `gigaspeech` dictionary:
```python
print(gigaspeech["train"])
```
**Print Output:**
```python
Dataset({
features: ['segment_id', 'speaker', 'text', 'audio', 'begin_time', 'end_time', 'audio_id', 'title', 'url', 'source', 'category', 'original_full_path'],
num_rows: 9389
})
```
This returns a [`Dataset`](https://huggingface.co/docs/datasets/v2.7.1/en/package_reference/main_classes#datasets.Dataset)
object, which contains the data for the training split. We can go one level deeper and get the first item of the split.
Again, this is possible through standard Python indexing:
```python
print(gigaspeech["train"][0])
```
**Print Output:**
```python
{'segment_id': 'YOU0000000315_S0000660',
'speaker': 'N/A',
'text': "AS THEY'RE LEAVING <COMMA> CAN KASH PULL ZAHRA ASIDE REALLY QUICKLY <QUESTIONMARK>",
'audio': {'path': '/home/sanchit_huggingface_co/.cache/huggingface/datasets/downloads/extracted/7f8541f130925e9b2af7d37256f2f61f9d6ff21bf4a94f7c1a3803ec648d7d79/xs_chunks_0000/YOU0000000315_S0000660.wav',
'array': array([0.0005188 , 0.00085449, 0.00012207, ..., 0.00125122, 0.00076294,
0.00036621], dtype=float32),
'sampling_rate': 16000
},
'begin_time': 2941.889892578125,
'end_time': 2945.070068359375,
'audio_id': 'YOU0000000315',
'title': 'Return to Vasselheim | Critical Role: VOX MACHINA | Episode 43',
'url': 'https://www.youtube.com/watch?v=zr2n1fLVasU',
'source': 2,
'category': 24,
'original_full_path': 'audio/youtube/P0004/YOU0000000315.opus',
}
```
We can see that there are a number of features returned by the training split, including `segment_id`, `speaker`, `text`,
`audio` and more. For speech recognition, we'll be concerned with the `text` and `audio` columns.
Using 🤗 Datasets' [`remove_columns`](https://huggingface.co/docs/datasets/process#remove) method, we can remove the
dataset features not required for speech recognition:
```python
COLUMNS_TO_KEEP = ["text", "audio"]
all_columns = gigaspeech["train"].column_names
columns_to_remove = set(all_columns) - set(COLUMNS_TO_KEEP)
gigaspeech = gigaspeech.remove_columns(columns_to_remove)
```
Let's check that we've successfully retained the `text` and `audio` columns:
```python
print(gigaspeech["train"][0])
```
**Print Output:**
```python
{'text': "AS THEY'RE LEAVING <COMMA> CAN KASH PULL ZAHRA ASIDE REALLY QUICKLY <QUESTIONMARK>",
'audio': {'path': '/home/sanchit_huggingface_co/.cache/huggingface/datasets/downloads/extracted/7f8541f130925e9b2af7d37256f2f61f9d6ff21bf4a94f7c1a3803ec648d7d79/xs_chunks_0000/YOU0000000315_S0000660.wav',
'array': array([0.0005188 , 0.00085449, 0.00012207, ..., 0.00125122, 0.00076294,
0.00036621], dtype=float32),
'sampling_rate': 16000}}
```
Great! We can see that we've got the two required columns `text` and `audio`. The `text` is a string with the sample
transcription and the `audio` a 1-dimensional array of amplitude values at a sampling rate of 16KHz. That's our
dataset loaded!
## Easy to Load, Easy to Process
Loading a dataset with 🤗 Datasets is just half of the fun. We can now use the suite of tools available to efficiently
pre-process our data ready for model training or inference. In this Section, we'll perform three stages of data
pre-processing:
1. [Resampling the Audio Data](#1-resampling-the-audio-data)
2. [Pre-Processing Function](#2-pre-processing-function)
3. [Filtering Function](#3-filtering-function)
### 1. Resampling the Audio Data
The `load_dataset` function prepares audio samples with the sampling rate that they were published with. This is not
always the sampling rate expected by our model. In this case, we need to _resample_ the audio to the correct sampling
rate.
We can set the audio inputs to our desired sampling rate using 🤗 Datasets'
[`cast_column`](https://huggingface.co/docs/datasets/package_reference/main_classes.html?highlight=cast_column#datasets.DatasetDict.cast_column)
method. This operation does not change the audio in-place, but rather signals to `datasets` to resample the audio samples
_on the fly_ when they are loaded. The following code cell will set the sampling rate to 8kHz:
```python
from datasets import Audio
gigaspeech = gigaspeech.cast_column("audio", Audio(sampling_rate=8000))
```
Re-loading the first audio sample in the GigaSpeech dataset will resample it to the desired sampling rate:
```python
print(gigaspeech["train"][0])
```
**Print Output:**
```python
{'text': "AS THEY'RE LEAVING <COMMA> CAN KASH PULL ZAHRA ASIDE REALLY QUICKLY <QUESTIONMARK>",
'audio': {'path': '/home/sanchit_huggingface_co/.cache/huggingface/datasets/downloads/extracted/7f8541f130925e9b2af7d37256f2f61f9d6ff21bf4a94f7c1a3803ec648d7d79/xs_chunks_0000/YOU0000000315_S0000660.wav',
'array': array([ 0.00046338, 0.00034808, -0.00086153, ..., 0.00099299,
0.00083484, 0.00080221], dtype=float32),
'sampling_rate': 8000}
}
```
We can see that the sampling rate has been downsampled to 8kHz. The array values are also different, as we've now only
got approximately one amplitude value for every two that we had before. Let's set the dataset sampling rate back to
16kHz, the sampling rate expected by most speech recognition models:
```python
gigaspeech = gigaspeech.cast_column("audio", Audio(sampling_rate=16000))
print(gigaspeech["train"][0])
```
**Print Output:**
```python
{'text': "AS THEY'RE LEAVING <COMMA> CAN KASH PULL ZAHRA ASIDE REALLY QUICKLY <QUESTIONMARK>",
'audio': {'path': '/home/sanchit_huggingface_co/.cache/huggingface/datasets/downloads/extracted/7f8541f130925e9b2af7d37256f2f61f9d6ff21bf4a94f7c1a3803ec648d7d79/xs_chunks_0000/YOU0000000315_S0000660.wav',
'array': array([0.0005188 , 0.00085449, 0.00012207, ..., 0.00125122, 0.00076294,
0.00036621], dtype=float32),
'sampling_rate': 16000}
}
```
Easy! `cast_column` provides a straightforward mechanism for resampling audio datasets as and when required.
### 2. Pre-Processing Function
One of the most challenging aspects of working with audio datasets is preparing the data in the right format for our
model. Using 🤗 Datasets' [`map`](https://huggingface.co/docs/datasets/v2.6.1/en/process#map) method, we can write a
function to pre-process a single sample of the dataset, and then apply it to every sample without any code changes.
First, let's load a processor object from 🤗 Transformers. This processor pre-processes the audio to input features and
tokenises the target text to labels. The `AutoProcessor` class is used to load a processor from a given model checkpoint.
In the example, we load the processor from OpenAI's [Whisper medium.en](https://huggingface.co/openai/whisper-medium.en)
checkpoint, but you can change this to any model identifier on the Hugging Face Hub:
```python
from transformers import AutoProcessor
processor = AutoProcessor.from_pretrained("openai/whisper-medium.en")
```
Great! Now we can write a function that takes a single training sample and passes it through the `processor` to prepare
it for our model. We'll also compute the input length of each audio sample, information that we'll need for the next
data preparation step:
```python
def prepare_dataset(batch):
audio = batch["audio"]
batch = processor(audio["array"], sampling_rate=audio["sampling_rate"], text=batch["text"])
batch["input_length"] = len(audio["array"]) / audio["sampling_rate"]
return batch
```
We can apply the data preparation function to all of our training examples using 🤗 Datasets' `map` method. Here, we also
remove the `text` and `audio` columns, since we have pre-processed the audio to input features and tokenised the text to
labels:
```python
gigaspeech = gigaspeech.map(prepare_dataset, remove_columns=gigaspeech["train"].column_names)
```
### 3. Filtering Function
Prior to training, we might have a heuristic for filtering our training data. For instance, we might want to filter any
audio samples longer than 30s to prevent truncating the audio samples or risking out-of-memory errors. We can do this in
much the same way that we prepared the data for our model in the previous step.
We start by writing a function that indicates which samples to keep and which to discard. This
function, `is_audio_length_in_range`, returns a boolean: samples that are shorter than 30s return True, and those
that are longer False.
```python
MAX_DURATION_IN_SECONDS = 30.0
def is_audio_length_in_range(input_length):
return input_length < MAX_DURATION_IN_SECONDS
```
We can apply this filtering function to all of our training examples using 🤗 Datasets' [`filter`](https://huggingface.co/docs/datasets/process#select-and-filter)
method, keeping all samples that are shorter than 30s (True) and discarding those that are longer (False):
```python
gigaspeech["train"] = gigaspeech["train"].filter(is_audio_length_in_range, input_columns=["input_length"])
```
And with that, we have the GigaSpeech dataset fully prepared for our model! In total, this process required 13 lines of
Python code, right from loading the dataset to the final filtering step.
Keeping the notebook as general as possible, we only performed the fundamental data preparation steps. However, there
is no restriction to the functions you can apply to your audio dataset. You can extend the function `prepare_dataset`
to perform much more involved operations, such as data augmentation, voice activity detection or noise reduction. With
🤗 Datasets, if you can write it in a Python function, you can apply it to your dataset!
## Streaming Mode: The Silver Bullet
One of the biggest challenges faced with audio datasets is their sheer size. The `xs` configuration of GigaSpeech contained just 10
hours of training data, but amassed over 13GB of storage space for download and preparation. So what happens when we
want to train on a larger split? The full `xl` configuration contains 10,000 hours of training data, requiring over 1TB of
storage space. For most speech researchers, this well exceeds the specifications of a typical hard drive disk.
Do we need to fork out and buy additional storage? Or is there a way we can train on these datasets with **no disk space
constraints**?
🤗 Datasets allow us to do just this. It is made possible through the use of [_streaming_](https://huggingface.co/docs/datasets/stream)
mode, depicted graphically in Figure 1. Streaming allows us to load the data progressively as we iterate over the dataset.
Rather than downloading the whole dataset at once, we load the dataset sample by sample. We iterate over the dataset,
loading and preparing samples _on the fly_ when they are needed. This way, we only ever load the samples that we're using,
and not the ones that we're not! Once we're done with a sample, we continue iterating over the dataset and load the next one.
This is analogous to _downloading_ a TV show versus _streaming_ it. When we download a TV show, we download the entire video
offline and save it to our disk. We have to wait for the entire video to download before we can watch it and require as
much disk space as size of the video file. Compare this to streaming a TV show. Here, we don’t download any part of the
video to disk, but rather iterate over the remote video file and load each part in real-time as required. We don't have
to wait for the full video to buffer before we can start watching, we can start as soon as the first portion of the video
is ready! This is the same _streaming_ principle that we apply to loading datasets.
<figure>
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/datasets/streaming.gif" alt="Trulli" style="width:100%">
<figcaption align = "center"><b>Figure 1:</b> Streaming mode. The dataset is loaded progressively as we iterate over the dataset.</figcaption>
</figure>
Streaming mode has three primary advantages over downloading the entire dataset at once:
1. **Disk space:** samples are loaded to memory one-by-one as we iterate over the dataset. Since the data is not downloaded locally, there are no disk space requirements, so you can use datasets of arbitrary size.
2. **Download and processing time:** audio datasets are large and need a significant amount of time to download and process. With streaming, loading and processing is done on the fly, meaning you can start using the dataset as soon as the first sample is ready.
3. **Easy experimentation:** you can experiment on a handful samples to check that your script works without having to download the entire dataset.
There is one caveat to streaming mode. When downloading a dataset, both the raw data and processed data are saved locally
to disk. If we want to re-use this dataset, we can directly load the processed data from disk, skipping the download and
processing steps. Consequently, we only have to perform the downloading and processing operations once, after which we
can re-use the prepared data. With streaming mode, the data is not downloaded to disk. Thus, neither the downloaded nor
pre-processed data are cached. If we want to re-use the dataset, the streaming steps must be repeated, with the audio
files loaded and processed on the fly again. For this reason, it is advised to download datasets that you are likely to use
multiple times.
How can you enable streaming mode? Easy! Just set `streaming=True` when you load your dataset. The rest will be taken
care for you:
```python
gigaspeech = load_dataset("speechcolab/gigaspeech", "xs", streaming=True)
```
All the steps covered so far in this tutorial can be applied to the streaming dataset without any code changes.
The only change is that you can no longer access individual samples using Python indexing (i.e. `gigaspeech["train"][sample_idx]`).
Instead, you have to iterate over the dataset, using a `for` loop for example.
Streaming mode can take your research to the next level: not only are the biggest datasets accessible to you, but you
can easily evaluate systems over multiple datasets in one go without worrying about your disk space. Compared
to evaluating on a single dataset, multi-dataset evaluation gives a better metric for the generalisation
abilities of a speech recognition system (_c.f._ [End-to-end Speech Benchmark (ESB)](https://arxiv.org/abs/2210.13352)).
The accompanying [Google Colab](https://colab.research.google.com/github/sanchit-gandhi/notebooks/blob/main/audio_datasets_colab.ipynb)
provides an example for evaluating the Whisper model on eight English speech recognition datasets in one script using
streaming mode.
## A Tour of Audio Datasets on The Hub
This Section serves as a reference guide for the most popular speech recognition, speech translation and audio
classification datasets on the Hugging Face Hub. We can apply everything that we've covered for the GigaSpeech dataset
to any of the datasets on the Hub. All we have to do is switch the dataset identifier in the `load_dataset` function.
It's that easy!
1. [English Speech Recognition](#english-speech-recognition)
2. [Multilingual Speech Recognition](#multilingual-speech-recognition)
3. [Speech Translation](#speech-translation)
4. [Audio Classification](#audio-classification)
### English Speech Recognition
Speech recognition, or speech-to-text, is the task of mapping from spoken speech to written text, where both the speech
and text are in the same language. We provide a summary of the most popular English speech recognition datasets on the Hub:
| Dataset | Domain | Speaking Style | Train Hours | Casing | Punctuation | License | Recommended Use |
|-----------------------------------------------------------------------------------------|-----------------------------|-----------------------|-------------|--------|-------------|-----------------|----------------------------------|
| [LibriSpeech](https://huggingface.co/datasets/librispeech_asr) | Audiobook | Narrated | 960 | ❌ | ❌ | CC-BY-4.0 | Academic benchmarks |
| [Common Voice 11](https://huggingface.co/datasets/mozilla-foundation/common_voice_11_0) | Wikipedia | Narrated | 2300 | ✅ | ✅ | CC0-1.0 | Non-native speakers |
| [VoxPopuli](https://huggingface.co/datasets/facebook/voxpopuli) | European Parliament | Oratory | 540 | ❌ | ✅ | CC0 | Non-native speakers |
| [TED-LIUM](https://huggingface.co/datasets/LIUM/tedlium) | TED talks | Oratory | 450 | ❌ | ❌ | CC-BY-NC-ND 3.0 | Technical topics |
| [GigaSpeech](https://huggingface.co/datasets/speechcolab/gigaspeech) | Audiobook, podcast, YouTube | Narrated, spontaneous | 10000 | ❌ | ✅ | apache-2.0 | Robustness over multiple domains |
| [SPGISpeech](https://huggingface.co/datasets/kensho/spgispeech) | Fincancial meetings | Oratory, spontaneous | 5000 | ✅ | ✅ | User Agreement | Fully formatted transcriptions |
| [Earnings-22](https://huggingface.co/datasets/revdotcom/earnings22) | Fincancial meetings | Oratory, spontaneous | 119 | ✅ | ✅ | CC-BY-SA-4.0 | Diversity of accents |
| [AMI](https://huggingface.co/datasets/edinburghcstr/ami) | Meetings | Spontaneous | 100 | ✅ | ✅ | CC-BY-4.0 | Noisy speech conditions |
Refer to the [Google Colab](https://colab.research.google.com/github/sanchit-gandhi/notebooks/blob/main/audio_datasets_colab.ipynb)
for a guide on evaluating a system on all eight English speech recognition datasets in one script.
The following dataset descriptions are largely taken from the [ESB Benchmark](https://arxiv.org/abs/2210.13352) paper.
#### [LibriSpeech ASR](https://huggingface.co/datasets/librispeech_asr)
LibriSpeech is a standard large-scale dataset for evaluating ASR systems. It consists of approximately 1,000
hours of narrated audiobooks collected from the [LibriVox](https://librivox.org/) project. LibriSpeech has been
instrumental in facilitating researchers to leverage a large body of pre-existing transcribed speech data. As such, it
has become one of the most popular datasets for benchmarking academic speech systems.
```python
librispeech = load_dataset("librispeech_asr", "all")
```
#### [Common Voice](https://huggingface.co/datasets/mozilla-foundation/common_voice_11_0)
Common Voice is a series of crowd-sourced open-licensed speech datasets where speakers record text from Wikipedia in
various languages. Since anyone can contribute recordings, there is significant variation in both audio quality and
speakers. The audio conditions are challenging, with recording artefacts, accented speech, hesitations, and the presence
of foreign words. The transcriptions are both cased and punctuated. The English subset of version 11.0 contains
approximately 2,300 hours of validated data. Use of the dataset requires you to agree to the Common Voice terms of use,
which can be found on the Hugging Face Hub: [mozilla-foundation/common_voice_11_0](https://huggingface.co/datasets/mozilla-foundation/common_voice_11_0).
Once you have agreed to the terms of use, you will be granted access to the dataset. You will then need to provide an
[authentication token](https://huggingface.co/settings/tokens) from the Hub when you load the dataset.
```python
common_voice = load_dataset("mozilla-foundation/common_voice_11", "en", use_auth_token=True)
```
#### [VoxPopuli](https://huggingface.co/datasets/facebook/voxpopuli)
VoxPopuli is a large-scale multilingual speech corpus consisting of data sourced from 2009-2020 European Parliament
event recordings. Consequently, it occupies the unique domain of oratory, political speech, largely sourced from
non-native speakers. The English subset contains approximately 550 hours labelled speech.
```python
voxpopuli = load_dataset("facebook/voxpopuli", "en")
```
#### [TED-LIUM](https://huggingface.co/datasets/LIUM/tedlium)
TED-LIUM is a dataset based on English-language TED Talk conference videos. The speaking style is oratory educational
talks. The transcribed talks cover a range of different cultural, political, and academic topics, resulting in a
technical vocabulary. The Release 3 (latest) edition of the dataset contains approximately 450 hours of training data.
The validation and test data are from the legacy set, consistent with earlier releases.
```python
tedlium = load_dataset("LIUM/tedlium", "release3")
```
#### [GigaSpeech](https://huggingface.co/datasets/speechcolab/gigaspeech)
GigaSpeech is a multi-domain English speech recognition corpus curated from audiobooks, podcasts and YouTube. It covers
both narrated and spontaneous speech over a variety of topics, such as arts, science and sports. It contains training
splits varying from 10 hours - 10,000 hours and standardised validation and test splits.
```python
gigaspeech = load_dataset("speechcolab/gigaspeech", "xs", use_auth_token=True)
```
#### [SPGISpeech](https://huggingface.co/datasets/kensho/spgispeech)
SPGISpeech is an English speech recognition corpus composed of company earnings calls that have been manually
transcribed by S&P Global, Inc. The transcriptions are fully-formatted according to a professional style guide for
oratory and spontaneous speech. It contains training splits ranging from 200 hours - 5,000 hours, with canonical
validation and test splits.
```python
spgispeech = load_dataset("kensho/spgispeech", "s", use_auth_token=True)
```
#### [Earnings-22](https://huggingface.co/datasets/revdotcom/earnings22)
Earnings-22 is a 119-hour corpus of English-language earnings calls collected from global companies. The dataset was
developed with the goal of aggregating a broad range of speakers and accents covering a range of real-world financial
topics. There is large diversity in the speakers and accents, with speakers taken from seven different language regions.
Earnings-22 was published primarily as a test-only dataset. The Hub contains a version of the dataset that has been
partitioned into train-validation-test splits.
```python
earnings22 = load_dataset("revdotcom/earnings22")
```
#### [AMI](https://huggingface.co/datasets/edinburghcstr/ami)
AMI comprises 100 hours of meeting recordings captured using different recording streams. The corpus contains manually
annotated orthographic transcriptions of the meetings aligned at the word level. Individual samples of the AMI dataset
contain very large audio files (between 10 and 60 minutes), which are segmented to lengths feasible for training most
speech recognition systems. AMI contains two splits: IHM and SDM. IHM (individual headset microphone) contains easier
near-field speech, and SDM (single distant microphone) harder far-field speech.
```python
ami = load_dataset("edinburghcstr/ami", "ihm")
```
### Multilingual Speech Recognition
Multilingual speech recognition refers to speech recognition (speech-to-text) for all languages except English.
#### [Multilingual LibriSpeech](https://huggingface.co/datasets/facebook/multilingual_librispeech)
Multilingual LibriSpeech is the multilingual equivalent of the [LibriSpeech ASR](https://huggingface.co/datasets/librispeech_asr) corpus.
It comprises a large corpus of read audiobooks taken from the [LibriVox](https://librivox.org/) project, making
it a suitable dataset for academic research. It contains data split into eight high-resource languages - English,
German, Dutch, Spanish, French, Italian, Portuguese and Polish.
#### [Common Voice](https://huggingface.co/datasets/mozilla-foundation/common_voice_11_0)
Common Voice is a series of crowd-sourced open-licensed speech datasets where speakers record text from Wikipedia in
various languages. Since anyone can contribute recordings, there is significant variation in both audio quality and
speakers. The audio conditions are challenging, with recording artefacts, accented speech, hesitations, and the presence
of foreign words. The transcriptions are both cased and punctuated. As of version 11, there are over 100 languages
available, both low and high-resource.
#### [VoxPopuli](https://huggingface.co/datasets/facebook/voxpopuli)
VoxPopuli is a large-scale multilingual speech corpus consisting of data sourced from 2009-2020 European Parliament
event recordings. Consequently, it occupies the unique domain of oratory, political speech, largely sourced from
non-native speakers. It contains labelled audio-transcription data for 15 European languages.
#### [FLEURS](https://huggingface.co/datasets/google/fleurs)
FLEURS (Few-shot Learning Evaluation of Universal Representations of Speech) is a dataset for evaluating speech recognition
systems in 102 languages, including many that are classified as 'low-resource'. The data is derived from the [FLoRes-101](https://arxiv.org/abs/2106.03193)
dataset, a machine translation corpus with 3001 sentence translations from English to 101 other languages. Native speakers
are recorded narrating the sentence transcriptions in their native language. The recorded audio data is paired with the
sentence transcriptions to yield multilingual speech recognition over all 101 languages. The training sets contain
approximately 10 hours of supervised audio-transcription data per language.
### Speech Translation
Speech translation is the task of mapping from spoken speech to written text, where the speech and text are in different
languages (e.g. English speech to French text).
#### [CoVoST 2](https://huggingface.co/datasets/covost2)
CoVoST 2 is a large-scale multilingual speech translation corpus covering translations from 21 languages into English
and from English into 15 languages. The dataset is created using Mozilla's open-source Common Voice database of
crowd-sourced voice recordings. There are 2,900 hours of speech represented in the corpus.
#### [FLEURS](https://huggingface.co/datasets/google/fleurs)
FLEURS (Few-shot Learning Evaluation of Universal Representations of Speech) is a dataset for evaluating speech recognition
systems in 102 languages, including many that are classified as 'low-resource'. The data is derived from the [FLoRes-101](https://arxiv.org/abs/2106.03193)
dataset, a machine translation corpus with 3001 sentence translations from English to 101 other languages. Native
speakers are recorded narrating the sentence transcriptions in their native languages. An \\(n\\)-way parallel corpus of
speech translation data is constructed by pairing the recorded audio data with the sentence transcriptions for each of the
101 languages. The training sets contain approximately 10 hours of supervised audio-transcription data per source-target
language combination.
### Audio Classification
Audio classification is the task of mapping a raw audio input to a class label output. Practical applications of audio
classification include keyword spotting, speaker intent and language identification.
#### [SpeechCommands](https://huggingface.co/datasets/speech_commands)
SpeechCommands is a dataset comprised of one-second audio files, each containing either a single spoken word in English
or background noise. The words are taken from a small set of commands and are spoken by a number of different speakers.
The dataset is designed to help train and evaluate small on-device keyword spotting systems.
#### [Multilingual Spoken Words](https://huggingface.co/datasets/MLCommons/ml_spoken_words)
Multilingual Spoken Words is a large-scale corpus of one-second audio samples, each containing a single spoken word. The
dataset consists of 50 languages and more than 340,000 keywords, totalling 23.4 million one-second spoken examples or
over 6,000 hours of audio. The audio-transcription data is sourced from the Mozilla Common Voice project. Time stamps
are generated for every utterance on the word-level and used to extract individual spoken words and their corresponding
transcriptions, thus forming a new corpus of single spoken words. The dataset's intended use is academic research and
commercial applications in multilingual keyword spotting and spoken term search.
#### [FLEURS](https://huggingface.co/datasets/google/fleurs)
FLEURS (Few-shot Learning Evaluation of Universal Representations of Speech) is a dataset for evaluating speech recognition
systems in 102 languages, including many that are classified as 'low-resource'. The data is derived from the [FLoRes-101](https://arxiv.org/abs/2106.03193)
dataset, a machine translation corpus with 3001 sentence translations from English to 101 other languages. Native
speakers are recorded narrating the sentence transcriptions in their native languages. The recorded audio data is paired
with a label for the language in which it is spoken. The dataset can be used as an audio classification dataset for
_language identification_: systems are trained to predict the language of each utterance in the corpus.
## Closing Remarks
In this blog post, we explored the Hugging Face Hub and experienced the Dataset Preview, an effective means of
listening to audio datasets before downloading them. We loaded an audio dataset with one line of Python
code and performed a series of generic pre-processing steps to prepare it for a machine learning model. In
total, this required just 13 lines of code, relying on simple Python functions to perform the necessary
operations. We introduced streaming mode, a method for loading and preparing samples of audio data on the fly. We
concluded by summarising the most popular speech recognition, speech translation and audio classification datasets on
the Hub.
Having read this blog, we hope you agree that 🤗 Datasets is the number one place for downloading and preparing audio
datasets. 🤗 Datasets is made possible through the work of the community. If you would like to contribute a dataset,
refer to the [Guide for Adding a New Dataset](https://huggingface.co/docs/datasets/share#share).
*Thank you to the following individuals who help contribute to the blog post: Vaibhav Srivastav, Polina Kazakova, Patrick von Platen, Omar Sanseviero and Quentin Lhoest.*
| 4 |
0 | hf_public_repos | hf_public_repos/blog/graphcore-update.md | ---
title: "Graphcore and Hugging Face Launch New Lineup of IPU-Ready Transformers"
thumbnail: /blog/assets/77_graphcore-update/graphcore_update.png
authors:
- user: sallydoherty
guest: true
---
# Graphcore and Hugging Face Launch New Lineup of IPU-Ready Transformers
[Graphcore](https://huggingface.co/hardware/graphcore/) and Hugging Face have significantly expanded the range of Machine Learning modalities and tasks available in [Hugging Face Optimum](https://github.com/huggingface/optimum), an open-source library for Transformers performance optimization. Developers now have convenient access to a wide range of off-the-shelf Hugging Face Transformer models, optimised to deliver the best possible performance on Graphcore’s IPU.
Including the [BERT transformer model](https://www.graphcore.ai/posts/getting-started-with-hugging-face-transformers-for-ipus-with-optimum) made available shortly after [Optimum Graphcore launched](https://huggingface.co/blog/graphcore), developers can now access 10 models covering Natural Language Processing (NLP), Speech and Computer Vision, which come with IPU configuration files and ready-to-use pre-trained and fine-tuned model weights.
## New Optimum models
### Computer vision
[ViT](https://huggingface.co/Graphcore/vit-base-ipu) (Vision Transformer) is a breakthrough in image recognition that uses the transformer mechanism as its main component. When images are input to ViT, they're divided into small patches similar to how words are processed in language systems. Each patch is encoded by the Transformer (Embedding) and then can be processed individually.
### NLP
[GPT-2](https://huggingface.co/Graphcore/gpt2-medium-wikitext-103) (Generative Pre-trained Transformer 2) is a text generation transformer model pretrained on a very large corpus of English data in a self-supervised fashion. This means it was pretrained on the raw texts only, with no humans labelling them in any way (which is why it can use lots of publicly available data) with an automatic process to generate inputs and labels from those texts. More precisely, it is trained to generate texts from a prompt by guessing the next word in sentences.
[RoBERTa](https://huggingface.co/Graphcore/roberta-base-squad2) (Robustly optimized BERT approach) is a transformer model that (like GPT-2) is pretrained on a large corpus of English data in a self-supervised fashion. More precisely, RoBERTa it was pretrained with the masked language modeling (MLM) objective. Taking a sentence, the model randomly masks 15% of the words in the input then runs the entire masked sentence through the model and has to predict the masked words. Roberta can be used for masked language modeling, but is mostly intended to be fine-tuned on a downstream task.
[DeBERTa](https://huggingface.co/Graphcore/deberta-base-ipu) (Decoding-enhanced BERT with disentangled attention) is a pretrained neural language model for NLP tasks. DeBERTa adapts the 2018 BERT and 2019 RoBERTa models using two novel techniques—a disentangled attention mechanism and an enhanced mask decoder—significantly improving the efficiency of model pretraining and performance of downstream tasks.
[BART](https://huggingface.co/Graphcore/bart-base-ipu) is a transformer encoder-encoder (seq2seq) model with a bidirectional (BERT-like) encoder and an autoregressive (GPT-like) decoder. BART is pre-trained by (1) corrupting text with an arbitrary noising function, and (2) learning a model to reconstruct the original text. BART is particularly effective when fine-tuned for text generation (e.g. summarization, translation) but also works well for comprehension tasks (e.g. text classification, question answering).
[LXMERT](https://huggingface.co/Graphcore/lxmert-gqa-uncased) (Learning Cross-Modality Encoder Representations from Transformers) is a multimodal transformer model for learning vision and language representations. It has three encoders: object relationship encoder, a language encoder, and a cross-modality encoder. It is pretrained via a combination of masked language modeling, visual-language text alignment, ROI-feature regression, masked visual-attribute modeling, masked visual-object modeling, and visual-question answering objectives. It has achieved state-of-the-art results on the VQA and GQA visual-question-answering datasets.
[T5](https://huggingface.co/Graphcore/t5-small-ipu) (Text-to-Text Transfer Transformer) is a revolutionary new model that can take any text and convert it into a machine learning format for translation, question answering or classification. It introduces a unified framework that converts all text-based language problems into a text-to-text format for transfer learning. By doing so, it has simplified a way to use the same model, objective function, hyperparameters, and decoding procedure across a diverse set of NLP tasks.
### Speech
[HuBERT](https://huggingface.co/Graphcore/hubert-base-ipu) (Hidden-Unit BERT) is a self-supervised speech recognition model pretrained on audio, learning a combined acoustic and language model over continuous inputs. The HuBERT model either matches or improves upon the state-of-the-art wav2vec 2.0 performance on the Librispeech (960h) and Libri-light (60,000h) benchmarks with 10min, 1h, 10h, 100h, and 960h fine-tuning subsets.
[Wav2Vec2](https://huggingface.co/Graphcore/wav2vec2-base-ipu) is a pretrained self-supervised model for automatic speech recognition. Using a novel contrastive pretraining objective, Wav2Vec2 learns powerful speech representations from large amounts of unlabelled speech data, followed by fine-tuning on a small amount of transcribed speech data, outperforming the best semi-supervised methods while being conceptually simpler.
## Hugging Face Optimum Graphcore: building on a solid partnership
Graphcore joined the [Hugging Face Hardware Partner Program](https://huggingface.co/hardware) in 2021 as a founding member, with both companies sharing the common goal of lowering the barriers for innovators seeking to harness the power of machine intelligence.
Since then, Graphcore and Hugging Face have worked together extensively to make training of transformer models on IPUs fast and easy, with the first Optimum Graphcore model (BERT) being made available last year.
Transformers have proven to be extremely efficient for a wide range of functions, including feature extraction, text generation, sentiment analysis, translation and many more. Models like BERT are widely used by Graphcore customers in a huge array of applications including cybersecurity, voice call automation, drug discovery, and translation.
Optimizing their performance in the real world requires considerable time, effort and skills that are beyond the reach of many companies and organizations. In providing an open-source library of transformer models, Hugging Face has directly addressed these issues. Integrating IPUs with HuggingFace also allows developers to leverage not just the models, but also datasets available in the HuggingFace Hub.
Developers can now use Graphcore systems to train 10 different types of state-of-the-art transformer models and access thousands of datasets with minimal coding complexity. With this partnership, we are providing users with the tools and ecosystem to easily download and fine-tune state-of-the-art pretrained models to various domains and downstream tasks.
## Bringing Graphcore’s latest hardware and software to the table
While members of Hugging Face’s ever-expanding user base have already been able to benefit from the speed, performance, and power- and cost-efficiency of IPU technology, a combination of recent hardware and software releases from Graphcore will unlock even more potential.
On the hardware front, the [Bow IPU](https://www.graphcore.ai/bow-processors) — announced in March and now shipping to customers — is the first processor in the world to use Wafer-on-Wafer (WoW) 3D stacking technology, taking the well-documented benefits of the IPU to the next level. Featuring ground-breaking advances in compute architecture and silicon implementation, communication and memory, each Bow IPU delivers up to 350 teraFLOPS of AI compute—an impressive 40% increase in performance—and up to 16% more power efficiency compared to the previous generation IPU. Importantly, Hugging Face Optimum users can switch seamlessly from previous generation IPUs to Bow processors, as no code changes are required.
Software also plays a vital role in unlocking the IPU’s capabilities, so naturally Optimum offers a plug-and-play experience with Graphcore’s easy-to-use Poplar SDK — which itself has received a major 2.5 update. Poplar makes it easy to train state-of-the-art models on state-of-the-art hardware, thanks to its full integration with standard machine learning frameworks, including PyTorch, PyTorch Lightning, and TensorFlow—as well as orchestration and deployment tools such as Docker and Kubernetes. Making Poplar compatible with these widely used, third-party systems allows developers to easily port their models from their other compute platforms and start taking advantage of the IPU’s advanced AI capabilities.
## Get started with Hugging Face’s Optimum Graphcore models
If you’re interested in combining the benefits of IPU technology with the strengths of transformer models, you can download the latest range of Optimum Graphcore models from the [Graphcore organization on the Hub](https://huggingface.co/Graphcore), or access the code from the [Optimum GitHub repo](https://github.com/huggingface/optimum-graphcore). Our [Getting Started blog post](https://huggingface.co/blog/graphcore-getting-started) will guide you through each step to start experimenting with IPUs.
Additionally, Graphcore has built an extensive page of [developer resources](https://www.graphcore.ai/developer), where you can find the IPU Model Garden—a repository of deployment-ready ML applications including computer vision, NLP, graph networks and more—alongside an array of documentation, tutorials, how-to-videos, webinars, and more. You can also access [Graphcore’s GitHub repo](https://github.com/graphcore) for more code references and tutorials.
To learn more about using Hugging Face on Graphcore, head over to our [partner page](https://huggingface.co/hardware/graphcore)!
| 5 |
0 | hf_public_repos | hf_public_repos/blog/chinese-ai-expansion.md | ---
title: "A Short Summary of Chinese AI Global Expansion"
thumbnail: /blog/assets/chinese-ai-expansion/thumbnail.png
authors:
- user: AdinaY
---
# A Short Summary of Chinese AI Global Expansion
In the early 15th century, Zheng He (also known as Chong Ho), a Chinese mariner and explorer during the early Ming Dynasty, led seven major naval expeditions, known as the "Voyages to the Western Oceans". His journey traced a path that went through Southeast Asia, the Middle East and then reached out to Africa. It was a bold move by China to establish diplomatic and trade relations with foreign lands, while exploring overseas opportunities. The word “出海” (Chu Hai, sailing abroad) has since held a special meaning about going global.
600 years later, China is once again making its mark internationally, evolving from a global manufacturing hub to a leader in ICT, electric vehicles, and AI technologies. By 2024, Chinese companies have accelerated their overseas expansion, particularly in AI. A June report from Feifan Research shows that out of 1,500 active AI companies worldwide, 751 are based in China, with 103 already expanding internationally. Many see this as a sign of China’s growing strength in tech innovation. Others argue that as domestic markets become saturated and competition intensifies, expanding overseas may have become the only viable option for these companies.
## Who is Expanding Overseas?
The first companies that are grabbing the opportunities of going global are, not surprisingly, leading Chinese tech giants. The likes of Huawei, Tencent, and Alibaba have chosen to focus on cloud computing and AI infrastructure when expanding overseas. In March 2024, **Tencent Cloud** partnered with Etihad Etisalat (Mobily), a leading telecom company in Saudi Arabia. Together, they launched the "Go Saudi" program, which aims to transform the digital landscape of the Saudi Arabia Kingdom as part of its Vision 2030 strategy. In May, **Huawei** launched Galaxy AI as part of a larger initiative to boost digital intelligence transformation in North Africa. An initiative which is part of Huawei's broader **$430 million**, five-year investment plan aimed at accelerating smart transformation across the region. That same month, **Alibaba** announced the construction of data centers in Korea, Malaysia, the Philippines, Thailand, and Mexico, alongside the release of the international version of its large model service platform, “Model Studio”.
Notably, these tech giants have centered their overseas strategies on Southeast Asia and the Middle East, aligning with China’s [Belt and Road Initiative](https://eng.yidaiyilu.gov.cn/) and the [Digital Silk Road policy](https://www.orcasia.org/digital-silk-road). Amid rising geopolitical tensions, choosing regions where Chinese is commonly spoken, such as Southeast Asia, or emerging markets like the Middle East and long-time allies like Africa, seems a more strategic choice.
**ByteDance**, referred to as an "App factory", has chosen to focus on familiar Western Business-to-Customer markets, launching 11 overseas applications in just seven months. **CapCut**, launched in 2020, released its paid version **CapCut Pro** in 2022, then integrated AI features in the beginning of 2024 and becoming one of the world’s most popular apps, with over **300 million** monthly active users. According to Sensor Tower, by July 2024, CapCut had generated **$125 million** in cumulative revenue from mobile applications.
Startups, despite being in the early stages of commercialization, are also eager to join the overseas expansion. The Chinese AI unicorn startups have a different strategy based on adopting a model + application approach. Facing high costs for training models, some have begun to shift focus from updating foundational models to more profitable application and scenario exploration. For startups reliant on funding, expanding overseas has become a necessity amid intense domestic competition. Many early-stage companies have chosen Western to-C markets, launching productivity, creative, and companion apps based on their respective models. For example, among the “Big Six” ( refers to the six most-watched Chinese AI startups: 01.AI, Baichuan AI, Zhipu, Moonshot AI, MiniMax, StepFun. However, the list often changes, some think DeepSeek AI and OpenBMB should be included.), apps “Talkie” from **MiniMax** and “PopAI” from **01.AI** which has gained millions of users and achieved a level of profitability.
## Why Expand Overseas?
#### Domestic Market Competition
China has the world's largest number of internet users and a vast pool of technical developers, and no one wants to be left behind in the AI boom. As more companies flood the space, AI technology has developed rapidly, but the growth of applications and use cases has been slower. Both industry giants and startups face growth stagnation and profit pressure.
Between October 2023 and September 2024, China released **238 LLMs**. After more than a year of fierce competition, they entered a phase of consolidation. The pressure built up in May 2024 during the first price war, triggered by **DeepSeek**, an AI startup, which introduced architectural innovations that significantly reduced model inference costs. Following the announcement, major players like ByteDance, Tencent, Baidu, and Alibaba swiftly followed with price reductions, even cutting prices to below cost margins. This fierce competition stems from minimal technical differentiation between models and slower-than-expected productization.
From the launch of ChatGPT to July 2024, **78,612 AI companies** have either been dissolved or suspended (resource:TMTPOST). The competition is not only pushing out the players from the ring, survivors are also drilling down to the niche to differentiate from the others. For example, the industry-specific LLMs are gaining traction, with a significant push from the government. The March 5, 2024 **“Government Work Report”** delivered by the Chinese Premier minister emphasized the **"AI+" strategy**, driving AI’s penetration across industries. By July 2024, the number of AI models registered with the Cyberspace Administration of China (CAC) exceeded 197, nearly 70% were industry-specific LLMs, particularly in sectors like finance, healthcare, and education. The peace will not last long, AI's rapid integration into vertical industries is expected to become a key area of another round of competition in the coming months.
Under this circumstance, going abroad seems to be a way out.
#### Pressures from Policy and Investment Environment
Government is not only incentivising, but also regulating. Between March and September 2024, the government introduced a series of regulatory policies, particularly around data privacy, algorithm transparency, and content labeling.
- March 5, 2024: The China National Information Security Standardization Technical Committee (TC260) released a technical document outlining basic safety requirements for generative AI services.
- September 14, 2024: The Cyberspace Administration of China (CAC) proposed new rules requiring AI-generated content to be labeled, ensuring users can easily tell if content is human or machine-made.
Regulations are indispensable for any new industry, however they also increase compliance costs for companies, especially for SMEs. Former Microsoft engineer Shao Meng commented, "Tighter regulations, especially for to-C teams, may push more companies to expand overseas, including their products and even their teams."
On top of the policy pressure, the investment environment is getting more and more rational over the last 6 months compared to the AI fever when ChatGPT was out. By mid-2024, Chinese AI startups raised approximately **$4.4 billion** across 372 funding rounds, a significant drop from the peak in 2021, when investments reached **$24.9 billion**.

#### Overseas Markets, promising land for Chinese AI companies
Compared to the domestic market, one particular element in certain overseas markets is that the individual customers have a greater willingness to pay, thanks to the healthy business environment. By proposing groundbreaking AI solutions meeting the local needs, Chinese AI companies can quickly develop stable revenue streams. For instance, in Southeast Asia, innovative approaches like AI-powered digital human livestreaming are breaking into the e-commerce live-streaming sector.
As for enterprise or government clients, emerging markets like Southeast Asia, the Middle East, and Africa have become the primary choices for Chinese AI companies as mentioned above. These regions, still in the early stages of digital transformation, are jumping directly to the latest technologies . Compared to saturated Western markets, these areas have less competition, higher potential for growth, and lower entry barriers, where Chinese AI tech giants are expanding their market share by capitalizing on their **technological strengths**, **cost-efficient structures**, and **government support**.
## What are the key success factors ?
#### Localization
Regulatory Localization: China has relatively strict AI governance policies, however it focuses more on content safety. While going abroad, Chinese AI companies must navigate diverse data privacy, security, and ethical regulations worldwide, which comes even before the implementation of their business model. [EU’s AI Act and privacy protection laws](https://www.europarl.europa.eu/topics/en/article/20230601STO93804/eu-ai-act-first-regulation-on-artificial-intelligence), is a perfect example for Chinese companies to adjust their AI models to meet the **EU’s privacy-by-design principles**, where data protection is built into the core of AI products and services.
Technical Localization: Despite the magic of AI, there is still no one size fits all solution. In emerging markets with weaker infrastructure, companies need to adjust their products to accommodate network conditions, data storage, and algorithm adaptability. Meanwhile, in developed markets, complying with industry standards such as data localization and ISO certifications etc.
#### Boosting International Influence
Despite the fast growing AI innovation in China, Chinese AI companies have not yet gained enough awareness in overseas markets. Releasing open-source projects on the [Hugging Face Hub](https://huggingface.co/) become an effective way to build global visibility. Beyond raising awareness, these models have also contributed valuable AI resources and diverse multilingual solutions to the global community. For example, at least one model from China appears on Hugging Face’s trending model leaderboard almost every one to two weeks. These include Alibaba’s [**Qwen**](https://huggingface.co/Qwen) series, which has been a “long-running hit” on [Hugging Face’s Open LLM leaderboard](https://huggingface.co/spaces/open-llm-leaderboard/open_llm_leaderboard), considered today to be one of the best open LLM in the world which support over 29 different languages; [**DeepSeek coder**](https://huggingface.co/deepseek-ai) is another one, that is highly praise by the open source community; and Zhipu AI’s also open sourced its [**GLM**](https://huggingface.co/collections/THUDM/glm-4-665fcf188c414b03c2f7e3b7) series and [**CogVideo**](https://huggingface.co/collections/THUDM/cogvideo-66c08e62f1685a3ade464cce).
Through open-source initiatives, these projects have gained considerable influence in the international open-source community, helping to enhance recognition, trust, and competitiveness for the Chinese projects in the global market.
An interesting point is that many Chinese companies, after expanding overseas, tend to adopt a new brand name or prefer to promote themselves using the name of their models or applications. “Chinese companies often create new brands for oversea products, even one per country, while Western companies prefer to use unified product names globally.” Engineer from Hugging Face Tiezhen Wang said. This approach helps them fit into local markets better and shields them from geopolitical pressure at the same time.
### Promoting ESG Strategy
AI for Good is no doubt an important initiative to explore the potential of AI for a bigger purpose, which is an all inclusive statement without borders. In Beijing, the China ESG30 Forum released the **"2024 China Enterprises Global Expansion Strategy Report."** This report highlighted the importance of ESG and AI, as two pillars for Chinese companies to integrate into a new phase of globalization. Some tech giants have already begun adopting green energy to drive the sustainable development of their global data centers, or using AI image recognition technologies to monitor wildlife, among others. AI applications are also being used with AI startups and traditional industries to co-develop green technologies, such as renewable energy and electric vehicles. Such innovations further promote product sustainability, helping Chinese firms stand out in the competitive landscape.
## Conclusion
**Chinese AI companies are at a critical turning point**. Expanding overseas is not just a simple market expansion strategy but a necessary choice, because of a harsh domestic environment but also for seemingly promising overseas opportunities. However, overseas expansion is not guaranteed to succeed. Under unfamiliar markets and audiences, to be able to quickly adjust to the local market, comply with regulations and build awareness seems also no less challenging.
What’s more, AI is still in an early stage of development, and its true power is unleashed when AI companies find the sweet spot of being an AI enabler to reshape the industries. Going abroad is relevant today for Chinese AI companies to grow, but it would become even more relevant when it actually integrates and brings value to the local industries.
Zheng He’s expedition to the “west ocean” was powered by a whole nation strategy thanks to its strong economic power. History seems to be repeating itself today but with a different context: technological innovation thrives not through centralized national efforts, but through the dynamic forces of the free market, where competition, entrepreneurship, and open exchange drive creativity and progress. China’s AI companies have made a long way to rise, and they still are a long way to flourish.
Thanks to Tiezhen Wang, Luke Cheng, Shao Meng and Sam Guo for providing valuable feedback.
Thank you for reading!
| 6 |
0 | hf_public_repos | hf_public_repos/blog/asr-chunking.md | ---
title: "Making automatic speech recognition work on large files with Wav2Vec2 in 🤗 Transformers"
thumbnail: /blog/assets/49_asr_chunking/thumbnail.png
authors:
- user: Narsil
---
# Making automatic speech recognition work on large files with Wav2Vec2 in 🤗 Transformers
```
Tl;dr: This post explains how to use the specificities of the Connectionist
Temporal Classification (CTC) architecture in order to achieve very good
quality automatic speech recognition (ASR) even on arbitrarily long files or
during live inference.
```
**Wav2Vec2** is a popular pre-trained model for speech recognition.
Released in [September 2020](https://ai.facebook.com/blog/wav2vec-20-learning-the-structure-of-speech-from-raw-audio/)
by Meta AI Research, the novel architecture catalyzed progress in
self-supervised pretraining for speech recognition, *e.g.* [*G. Ng et
al.*, 2021](https://arxiv.org/pdf/2104.03416.pdf), [*Chen et al*,
2021](https://arxiv.org/abs/2110.13900), [*Hsu et al.*,
2021](https://arxiv.org/abs/2106.07447) and [*Babu et al.*,
2021](https://arxiv.org/abs/2111.09296). On the Hugging Face Hub,
Wav2Vec2's most popular pre-trained checkpoint currently amounts to
over [**250,000** monthly
downloads](https://huggingface.co/facebook/wav2vec2-base-960h).
**Wav2Vec2** is at its core a **transformers** models and one caveat
of **transformers** is that it usually has a finite amount of sequence
length it can handle. Either because it uses **position encodings** (not
the case here) or simply because the cost of attention in transformers
is actually O(n²) in sequence_length, meaning that using very large
sequence_length explodes in complexity/memory. So you cannot run with finite hardware
(even a very large GPU like A100), simply run Wav2Vec2 on an hour long
file. Your program will crash. Let's try it !
```bash
pip install transformers
```
```python
from transformers import pipeline
# This will work on any of the thousands of models at
# https://huggingface.co/models?pipeline_tag=automatic-speech-recognition
pipe = pipeline(model="facebook/wav2vec2-base-960h")
# The Public Domain LibriVox file used for the test
#!wget https://ia902600.us.archive.org/8/items/thecantervilleghostversion_2_1501_librivox/thecantervilleghostversion2_01_wilde_128kb.mp3 -o very_long_file.mp3
pipe("very_long_file.mp3")
# Crash out of memory !
pipe("very_long_file.mp3", chunk_length_s=10)
# This works and prints a very long string !
# This whole blogpost will explain how to make things work
```
Simple Chunking
---------------
The simplest way to achieve inference on very long files would be to simply chunk
the initial audio into shorter samples, let's say 10 seconds each, run inference on those, and end up
with a final reconstruction. This is efficient computationally but usually leads
to subpar results, the reason being that in order to do good inference, the model
needs some context, so around the chunking border, inference tends to be of poor
quality.
Look at the following diagram:

There are ways to try and work around the problem in a general fashion, but
they are never entirely robust. You can try to chunk only when you encounter
silence but you may have a non silent audio for a long time (a song, or noisy
café audio). You can also try to cut only when there's no voice but it requires
another model and this is not an entirely solved problem. You could also have
a continous voice for a very long time.
As it turns out, CTC structure, which is used by Wav2Vec2, can be exploited
in order to achieve very robust speech recognition even on very long files
without falling into those pitfalls.
Chunking with stride
--------------------
Wav2Vec2 uses the [CTC algorithm](https://distill.pub/2017/ctc/), which means that every frame of audio is mapped
to a single letter prediction (logit).

That's the main feature we're going to use in order to add a `stride`.
This [link](https://www.quora.com/What-does-stride-mean-in-the-context-of-convolutional-neural-networks) explains it
in the image context, but it's the same concept for audio.
Because of this property, we can:
- Start doing inference on **overlapping** chunks
so that the model actually has proper context in the center.
- **Drop** the inferenced logits on the side.
- Chain the **logits** without their dropped sides to recover something extremely close to what the model would have
predicted on the full length audio.

This is not **technically** 100% the same thing as running the model on the whole
file so it is not enabled by default, but as you saw in the earlier example you
need only to add `chunk_length_s` to your `pipeline` for it to work.
In practice, we observed that most of the bad inference is kept within
the strides, which get dropped before inference, leading to a proper
inference of the full text.
Let's note that you can choose every argument of this technique:
```python
from transformers import pipeline
pipe = pipeline(model="facebook/wav2vec2-base-960h")
# stride_length_s is a tuple of the left and right stride length.
# With only 1 number, both sides get the same stride, by default
# the stride_length on one side is 1/6th of the chunk_length_s
output = pipe("very_long_file.mp3", chunk_length_s=10, stride_length_s=(4, 2))
```
Chunking with stride on LM augmented models
-------------------------------------------
In [transformers](https://github.com/huggingface/transformers), we also
added support for adding LM to Wav2Vec2 in order to boost the WER performance
of the models without even finetuning. [See this excellent blogpost explaining
how it works](https://huggingface.co/blog/wav2vec2-with-ngram).
It turns out, that the LM works directly on the logits themselves, so we
can actually apply the exact same technique as before without any modification !
So chunking large files on these LM boosted models still works out of the box.
Live inference
--------------
A very nice perk of using a CTC model like Wav2vec2, is that it is a single
pass model, so it is **very** fast. Especially on GPU. We can exploit that in order
to do live inference.
The principle is exactly the same as regular striding, but this time we can
feed the pipeline data **as it is coming in** and simply use striding on
full chunks of length 10s for instance with 1s striding to get proper context.
That requires running much more inference steps than simple file chunking, but it can make the
live experience much better because the model can print things as you are
speaking, without having to wait for X seconds before seeing something displayed.
| 7 |
0 | hf_public_repos | hf_public_repos/blog/personal-copilot.md | ---
title: "Personal Copilot: Train Your Own Coding Assistant"
thumbnail: /blog/assets/170_personal_copilot/thumbnail.png
authors:
- user: smangrul
- user: sayakpaul
---
# Personal Copilot: Train Your Own Coding Assistant
In the ever-evolving landscape of programming and software development, the quest for efficiency and productivity has led to remarkable innovations. One such innovation is the emergence of code generation models such as [Codex](https://openai.com/blog/openai-codex), [StarCoder](https://arxiv.org/abs/2305.06161) and [Code Llama](https://arxiv.org/abs/2308.12950). These models have demonstrated remarkable capabilities in generating human-like code snippets, thereby showing immense potential as coding assistants.
However, while these pre-trained models can perform impressively across a range of tasks, there's an exciting possibility lying just beyond the horizon: the ability to tailor a code generation model to your specific needs. Think of personalized coding assistants which could be leveraged at an enterprise scale.
In this blog post we show how we created HugCoder 🤗, a code LLM fine-tuned on the code contents from the public repositories of the [`huggingface` GitHub organization](https://github.com/huggingface). We will discuss our data collection workflow, our training experiments, and some interesting results. This will enable you to create your own personal copilot based on your proprietary codebase. We will leave you with a couple of further extensions of this project for experimentation.
Let’s begin 🚀

## Data Collection Workflow
Our desired dataset is conceptually simple, we structured it like so:
| | | |
|---|---|---|
| Repository Name | Filepath in the Repository | File Contents |
|---|---|---|
|---|---|---|
Scraping code contents from GitHub is straightforward with the [Python GitHub API](https://github.com/PyGithub/PyGithub). However, depending on the number of repositories and the number of code files within a repository, one might easily run into API rate-limiting issues.
To prevent such problems, we decided to clone all the public repositories locally and extract the contents from them instead of through the API. We used the `multiprocessing` module from Python to download all repos in parallel, as shown in [this download script](https://github.com/sayakpaul/hf-codegen/blob/main/data/parallel_clone_repos.py).
A repository can often contain non-code files such as images, presentations and other assets. We’re not interested in scraping them. We created a [list of extensions](https://github.com/sayakpaul/hf-codegen/blob/f659eba76f07e622873211e5b975168b634e6c22/data/prepare_dataset.py#L17C1-L49C68) to filter them out. To parse code files other than Jupyter Notebooks, we simply used the "utf-8" encoding. For notebooks, we only considered the code cells.
We also excluded all file paths that were not directly related to code. These include: `.git`, `__pycache__`, and `xcodeproj`.
To keep the serialization of this content relatively memory-friendly, we used chunking and the [feather format](https://arrow.apache.org/docs/python/feather.html#:~:text=Feather%20is%20a%20portable%20file,Python%20(pandas)%20and%20R.). Refer to [this script](https://github.com/sayakpaul/hf-codegen/blob/main/data/prepare_dataset.py) for the full implementation.
The final dataset is [available on the Hub](https://huggingface.co/datasets/sayakpaul/hf-codegen-v2), and it looks like this:

For this blog, we considered the top 10 Hugging Face public repositories, based on stargazers. They are the following:
> ['transformers', 'pytorch-image-models', 'datasets', 'diffusers', 'peft', 'tokenizers', 'accelerate', 'text-generation-inference', 'chat-ui', 'deep-rl-class']
[This is the code we used to generate this dataset](https://github.com/pacman100/DHS-LLM-Workshop/tree/main/personal_copilot/dataset_generation), and [this is the dataset in the Hub](https://huggingface.co/datasets/smangrul/hf-stack-v1). Here is a snapshot of what it looks like:

To reduce the project complexity, we didn’t consider deduplication of the dataset. If you are interested in applying deduplication techniques for a production application, [this blog post](https://huggingface.co/blog/dedup) is an excellent resource about the topic in the context of code LLMs.
## Finetuning your own Personal Co-Pilot
In this section, we show how to fine-tune the following models: [`bigcode/starcoder`](https://hf.co/bigcode/starcoder) (15.5B params), [`bigcode/starcoderbase-1b`](https://hf.co/bigcode/starcoderbase-1b) (1B params), [`Deci/DeciCoder-1b`](https://hf.co/Deci/DeciCoder-1b) (1B params). We'll use a single A100 40GB Colab Notebook using 🤗 PEFT (Parameter-Efficient Fine-Tuning) for all the experiments. Additionally, we'll show how to fully finetune the `bigcode/starcoder` (15.5B params) on a machine with 8 A100 80GB GPUs using 🤗 Accelerate's FSDP integration. The training objective is [fill in the middle (FIM)](https://arxiv.org/abs/2207.14255), wherein parts of a training sequence are moved to the end, and the reordered sequence is predicted auto-regressively.
Why PEFT? Full fine-tuning is expensive. Let’s have some numbers to put things in perspective:
Minimum GPU memory required for full fine-tuning:
1. Weight: 2 bytes (Mixed-Precision training)
2. Weight gradient: 2 bytes
3. Optimizer state when using Adam: 4 bytes for original FP32 weight + 8 bytes for first and second moment estimates
4. Cost per parameter adding all of the above: 16 bytes per parameter
5. **15.5B model -> 248GB of GPU memory without even considering huge memory requirements for storing intermediate activations -> minimum 4X A100 80GB GPUs required**
Since the hardware requirements are huge, we'll be using parameter-efficient fine-tuning using [QLoRA](https://arxiv.org/abs/2305.14314). Here are the minimal GPU memory requirements for fine-tuning StarCoder using QLoRA:
> trainable params: 110,428,160 || all params: 15,627,884,544 || trainable%: 0.7066097761926236
1. Base model Weight: 0.5 bytes * 15.51B frozen params = 7.755 GB
2. Adapter weight: 2 bytes * 0.11B trainable params = 0.22GB
3. Weight gradient: 2 bytes * 0.11B trainable params = 0.12GB
4. Optimizer state when using Adam: 4 bytes * 0.11B trainable params * 3 = 1.32GB
5. **Adding all of the above -> 9.51 GB ~10GB -> 1 A100 40GB GPU required** 🤯. The reason for A100 40GB GPU is that the intermediate activations for long sequence lengths of 2048 and batch size of 4 for training lead to higher memory requirements. As we will see below, GPU memory required is 26GB which can be accommodated on A100 40GB GPU. Also, A100 GPUs have better compatibilty with Flash Attention 2.
In the above calculations, we didn't consider memory required for intermediate activation checkpointing which is considerably huge. We leverage Flash Attention V2 and Gradient Checkpointing to overcome this issue.
1. For QLoRA along with flash attention V2 and gradient checkpointing, the total memory occupied by the model on a single A100 40GB GPU is **26 GB** with a **batch size of 4**.
2. For full fine-tuning using FSDP along with Flash Attention V2 and Gradient Checkpointing, the memory occupied per GPU ranges between **70 GB to 77.6 GB** with a **per_gpu_batch_size of 1**.
Please refer to the [model-memory-usage](https://huggingface.co/spaces/hf-accelerate/model-memory-usage) to easily calculate how much vRAM is needed to train and perform big model inference on a model hosted on the 🤗 Hugging Face Hub.
## Full Finetuning
We will look at how to do full fine-tuning of `bigcode/starcoder` (15B params) on 8 A100 80GB GPUs using PyTorch Fully Sharded Data Parallel (FSDP) technique. For more information on FSDP, please refer to [Fine-tuning Llama 2 70B using PyTorch FSDP](https://huggingface.co/blog/ram-efficient-pytorch-fsdp) and [Accelerate Large Model Training using PyTorch Fully Sharded Data Parallel](https://huggingface.co/blog/pytorch-fsdp).
**Resources**
1. Codebase: [link](https://github.com/pacman100/DHS-LLM-Workshop/tree/main/personal_copilot/training). It uses the recently added Flash Attention V2 support in Transformers.
2. FSDP Config: [fsdp_config.yaml](https://github.com/pacman100/DHS-LLM-Workshop/blob/main/personal_copilot/training/configs/fsdp_config.yaml)
3. Model: [bigcode/stacoder](https://huggingface.co/bigcode/starcoder)
4. Dataset: [smangrul/hf-stack-v1](https://huggingface.co/datasets/smangrul/hf-stack-v1)
5. Fine-tuned Model: [smangrul/peft-lora-starcoder15B-v2-personal-copilot-A100-40GB-colab](https://huggingface.co/smangrul/peft-lora-starcoder15B-v2-personal-copilot-A100-40GB-colab)
The command to launch training is given at [run_fsdp.sh](https://github.com/pacman100/DHS-LLM-Workshop/blob/main/personal_copilot/training/run_fsdp.sh).
```
accelerate launch --config_file "configs/fsdp_config.yaml" train.py \
--model_path "bigcode/starcoder" \
--dataset_name "smangrul/hf-stack-v1" \
--subset "data" \
--data_column "content" \
--split "train" \
--seq_length 2048 \
--max_steps 2000 \
--batch_size 1 \
--gradient_accumulation_steps 2 \
--learning_rate 5e-5 \
--lr_scheduler_type "cosine" \
--weight_decay 0.01 \
--num_warmup_steps 30 \
--eval_freq 100 \
--save_freq 500 \
--log_freq 25 \
--num_workers 4 \
--bf16 \
--no_fp16 \
--output_dir "starcoder-personal-copilot-A100-40GB-colab" \
--fim_rate 0.5 \
--fim_spm_rate 0.5 \
--use_flash_attn
```
The total training time was **9 Hours**. Taking the cost of $12.00 / hr based on [lambdalabs](https://lambdalabs.com/service/gpu-cloud/pricing) for 8x A100 80GB GPUs, the total cost would be **$108**.
## PEFT
We will look at how to use QLoRA for fine-tuning `bigcode/starcoder` (15B params) on a single A100 40GB GPU using 🤗 PEFT. For more information on QLoRA and PEFT methods, please refer to [Making LLMs even more accessible with bitsandbytes, 4-bit quantization and QLoRA](https://huggingface.co/blog/4bit-transformers-bitsandbytes) and [🤗 PEFT: Parameter-Efficient Fine-Tuning of Billion-Scale Models on Low-Resource Hardware](https://huggingface.co/blog/peft).
**Resources**
1. Codebase: [link](https://github.com/pacman100/DHS-LLM-Workshop/tree/main/personal_copilot/training). It uses the recently added Flash Attention V2 support in Transformers.
2. Colab notebook: [link](https://colab.research.google.com/drive/1Tz9KKgacppA4S6H4eo_sw43qEaC9lFLs?usp=sharing). Make sure to choose A100 GPU with High RAM setting.
3. Model: [bigcode/stacoder](https://huggingface.co/bigcode/starcoder)
4. Dataset: [smangrul/hf-stack-v1](https://huggingface.co/datasets/smangrul/hf-stack-v1)
5. QLoRA Fine-tuned Model: [smangrul/peft-lora-starcoder15B-v2-personal-copilot-A100-40GB-colab](https://huggingface.co/smangrul/peft-lora-starcoder15B-v2-personal-copilot-A100-40GB-colab)
The command to launch training is given at [run_peft.sh](https://github.com/pacman100/DHS-LLM-Workshop/blob/main/personal_copilot/training/run_peft.sh). The total training time was **12.5 Hours**. Taking the cost of **$1.10 / hr** based on [lambdalabs](https://lambdalabs.com/service/gpu-cloud/pricing), the total cost would be **$13.75**. That's pretty good 🚀! In terms of cost, it's **7.8X** lower than the cost for full fine-tuning.
## Comparison
The plot below shows the eval loss, train loss and learning rate scheduler for QLoRA vs full fine-tuning. We observe that full fine-tuning leads to slightly lower loss and converges a bit faster compared to QLoRA. The learning rate for peft fine-tuning is 10X more than that of full fine-tuning.

To make sure that our QLoRA model doesn't lead to catastrophic forgetting, we run the Python Human Eval on it. Below are the results we got. `Pass@1` measures the pass rate of completions considering just a single generated code candidate per problem. We can observe that the performance on `humaneval-python` is comparable between the base `bigcode/starcoder` (15B params) and the fine-tuned PEFT model `smangrul/peft-lora-starcoder15B-v2-personal-copilot-A100-40GB-colab`.
| | |
|---|---|
| Model | Pass@1 |
|bigcode/starcoder | 33.57|
|smangrul/peft-lora-starcoder15B-v2-personal-copilot-A100-40GB-colab| 33.37 |
Let's now look at some qualitative samples. In our manual analysis, we noticed that the QLoRA led to slight overfitting and as such we down weigh it by creating new weighted adapter with weight 0.8 via `add_weighted_adapter` utility of PEFT.
We will look at 2 code infilling examples wherein the task of the model is to fill the part denoted by the `<FILL_ME>` placeholder. We will consider infilling completions from GitHub Copilot, the QLoRA fine-tuned model and the full fine-tuned model.

*Qualitative Example 1*
In the example above, the completion from GitHub Copilot is along the correct lines but doesn't help much. On the other hand, completions from QLoRA and full fine-tuned models are correctly infilling the entire function call with the necessary parameters. However, they are also adding a lot more noise afterwards. This could be controlled with a post-processing step to limit completions to closing brackets or new lines. Note that both QLoRA and full fine-tuned models produce results with similar quality.

Qualitative Example 2
In the second example above, **GitHub Copilot didn't give any completion**. This can be due to the fact that 🤗 PEFT is a recent library and not yet part of Copilot's training data, which **is exactly the type of problem we are trying to address**. On the other hand, completions from QLoRA and full fine-tuned models are correctly infilling the entire function call with the necessary parameters. Again, note that both the QLoRA and the full fine-tuned models are giving generations of similar quality. Inference Code with various examples for full fine-tuned model and peft model are available at [Full_Finetuned_StarCoder_Inference.ipynb](https://github.com/pacman100/DHS-LLM-Workshop/blob/main/personal_copilot/inference/Full_Finetuned_StarCoder_Inference.ipynb) and [PEFT_StarCoder_Inference.ipynb](https://github.com/pacman100/DHS-LLM-Workshop/blob/main/personal_copilot/inference/PEFT_StarCoder_Inference.ipynb), respectively.
Therefore, we can observe that the generations from both the variants are as per expectations. Awesome! 🚀
## How do I use it in VS Code?
You can easily configure a custom code-completion LLM in VS Code using 🤗 [llm-vscode](https://marketplace.visualstudio.com/items?itemName=HuggingFace.huggingface-vscode) VS Code Extension, together with hosting the model via [🤗 Inference EndPoints](https://ui.endpoints.huggingface.co/). We'll go through the required steps below. You can learn more details about deploying an endpoint in the [inference endpoints documentation](https://huggingface.co/docs/inference-endpoints/index).
### Setting an Inference Endpoint
Below are the screenshots with the steps we followed to create our custom Inference Endpoint. We used our QLoRA model, exported as a full-sized _merged_ model that can be easily loaded in `transformers`.


### Setting up the VS Code Extension
Just follow the [installation steps](https://github.com/huggingface/llm-vscode#installation). In the settings, replace the endpoint in the field below, so it points to the HF Inference Endpoint you deployed.

Usage will look like below:

## Finetuning your own Code Chat Assistant
So far, the models we trained were specifically trained as personal co-pilot for code completion tasks. They aren't trained to carry out conversations or for question answering. `Octocoder` and `StarChat` are great examples of such models. This section briefly describes how to achieve that.
**Resources**
1. Codebase: [link](https://github.com/pacman100/DHS-LLM-Workshop/tree/main/code_assistant/training). It uses the recently added Flash Attention V2 support in Transformers.
2. Colab notebook: [link](https://colab.research.google.com/drive/1XFyePK-3IoyX81RM94JO73CcIZtAU4i4?usp=sharing). Make sure to choose A100 GPU with High RAM setting.
3. Model: [bigcode/stacoderplus](https://huggingface.co/bigcode/starcoderplus)
4. Dataset: [smangrul/code-chat-assistant-v1](https://huggingface.co/datasets/smangrul/code-chat-assistant-v1). Mix of `LIMA+GUANACO` with proper formatting in a ready-to-train format.
5. Trained Model: [smangrul/peft-lora-starcoderplus-chat-asst-A100-40GB-colab](https://huggingface.co/smangrul/peft-lora-starcoderplus-chat-asst-A100-40GB-colab)
## Dance of LoRAs
If you have dabbled with Stable Diffusion models and LoRAs for making your own Dreambooth models, you might be familiar with the concepts of combining different LoRAs with different weights, using a LoRA model with a different base model than the one on which it was trained. In text/code domain, this remains unexplored territory. We carry out experiments in this regard and have observed very promising findings. Are you ready? Let's go! 🚀
### Mix-and-Match LoRAs
PEFT currently supports 3 ways of combining LoRA models, `linear`, `svd` and `cat`. For more details, refer to [tuners#peft.LoraModel.add_weighted_adapter](https://huggingface.co/docs/peft/main/en/package_reference/tuners#peft.LoraModel.add_weighted_adapter).
Our notebook [Dance_of_LoRAs.ipynb](https://github.com/pacman100/DHS-LLM-Workshop/blob/main/personal_copilot/inference/Dance_of_LoRAs.ipynb) includes all the inference code and various LoRA loading combinations, like loading the chat assistant on top of `starcoder` instead of `starcodeplus`, which is the base model that we fine-tuned.
Here, we will consider 2 abilities (`chatting/QA` and `code-completion`) on 2 data distributions (`top 10 public hf codebase` and `generic codebase`). That gives us 4 axes on which we'll carry out some qualitative evaluation analyses.
##### First, let us consider the `chatting/QA` task.
If we disable adapters, we observe that the task fails for both datasets, as the base model (`starcoder`) is only meant for code completion and not suitable for `chatting/question-answering`. Enabling `copilot` adapter performs similar to the disabled case because this LoRA was also specifically fine-tuned for code-completion.
Now, let's enable the `assistant` adapter.

Question Answering based on generic code

Question Answering based on HF code
We can observe that generic question regarding `scrapy` is being answered properly. However, it is failing for the HF code related question which wasn't part of its pretraining data.
###### Let us now consider the `code-completion` task.
On disabling adapters, we observe that the code completion for the generic two-sum works as expected. However, the HF code completion fails with wrong params to `LoraConfig`, because the base model hasn't seen it in its pretraining data. Enabling `assistant` performs similar to the disabled case as it was trained on natural language conversations which didn't have any Hugging Face code repos.
Now, let's enable the `copilot` adapter.

We can observe that the `copilot` adapter gets it right in both cases. Therefore, it performs as expected for code-completions when working with HF specific codebase as well as generic codebases.
**Now, as a user, I want to combine the ability of `assistant` as well as `copilot`. This will enable me to use it for code completion while coding in an IDE, and also have it as a chatbot to answer my questions regarding APIs, classes, methods, documentation. It should be able to provide answers to questions like `How do I use x`, `Please write a code snippet for Y` on my codebase.**
PEFT allows you to do it via `add_weighted_adapter`. Let's create a new adapter `code_buddy` with equal weights to `assistant` and `copilot` adapters.

Combining Multiple Adapters
Now, let's see how `code_buddy` performs on the `chatting/question_answering` tasks.

We can observe that `code_buddy` is performing much better than the `assistant` or `copilot` adapters alone! It is able to answer the _write a code snippet_ request to show how to use a specific HF repo API. However, it is also hallucinating the wrong links/explanations, which remains an open challenge for LLMs.
Below is the performance of `code_buddy` on code completion tasks.

We can observe that `code_buddy` is performing on par with `copilot`, which was specifically finetuned for this task.
### Transfer LoRAs to different base models
We can also transfer the LoRA models to different base models.
We will take the hot-off-the-press `Octocoder` model and apply on it the LoRA we trained above with `starcoder` base model. Please go through the following notebook [PEFT_Personal_Code_CoPilot_Adapter_Transfer_Octocoder.ipynb](https://github.com/pacman100/DHS-LLM-Workshop/blob/main/personal_copilot/inference/PEFT_Personal_Code_CoPilot_Adapter_Transfer_Octocoder.ipynb) for the entire code.
**Performance on the Code Completion task**

We can observe that `octocoder` is performing great. It is able to complete HF specific code snippets. It is also able to complete generic code snippets as seen in the notebook.
**Performance on the Chatting/QA task**
As Octocoder is trained to answer questions and carry out conversations about coding, let's see if it can use our LoRA adapter to answer HF specific questions.

Yay! It correctly answers in detail how to create `LoraConfig` and related peft model along with correctly using the model name, dataset name as well as param values of LoraConfig. On disabling the adapter, it fails to correctly use the API of `LoraConfig` or to create a PEFT model, suggesting that it isn't part of the training data of Octocoder.
## How do I run it locally?
I know, after all this, you want to finetune starcoder on your codebase and use it locally on your consumer hardware such as Mac laptops with M1 GPUs, windows with RTX 4090/3090 GPUs ...
Don't worry, we have got you covered.
We will be using this super cool open source library [mlc-llm](https://github.com/mlc-ai/mlc-llm) 🔥. Specifically, we will be using this fork [pacman100/mlc-llm](https://github.com/pacman100/mlc-llm) which has changes to get it working with the Hugging Face Code Completion extension for VS Code. On my Mac latop with M1 Metal GPU, the 15B model was painfully slow. Hence, we will go small and train a PEFT LoRA version as well as a full finetuned version of `bigcode/starcoderbase-1b`. The training colab notebooks are linked below:
1. Colab notebook for Full fine-tuning and PEFT LoRA finetuning of `starcoderbase-1b`: [link](https://colab.research.google.com/drive/1tTdvc2buL3Iy1PKwrG_bBIDP06DC9r5m?usp=sharing)
The training loss, evaluation loss as well as learning rate schedules are plotted below:

Now, we will look at detailed steps for locally hosting the merged model [smangrul/starcoder1B-v2-personal-copilot-merged](https://huggingface.co/smangrul/starcoder1B-v2-personal-copilot-merged) and using it with 🤗 [llm-vscode](https://marketplace.visualstudio.com/items?itemName=HuggingFace.huggingface-vscode) VS Code Extension.
1. Clone the repo
```
git clone --recursive https://github.com/pacman100/mlc-llm.git && cd mlc-llm/
```
2. Install the mlc-ai and mlc-chat (in editable mode) :
```
pip install --pre --force-reinstall mlc-ai-nightly mlc-chat-nightly -f https://mlc.ai/wheels
cd python
pip uninstall mlc-chat-nightly
pip install -e "."
```
3. Compile the model via:
```
time python3 -m mlc_llm.build --hf-path smangrul/starcoder1B-v2-personal-copilot-merged --target metal --use-cache=0
```
4. Update the config with the following values in `dist/starcoder1B-v2-personal-copilot-merged-q4f16_1/params/mlc-chat-config.json`:
```diff
{
"model_lib": "starcoder7B-personal-copilot-merged-q4f16_1",
"local_id": "starcoder7B-personal-copilot-merged-q4f16_1",
"conv_template": "code_gpt",
- "temperature": 0.7,
+ "temperature": 0.2,
- "repetition_penalty": 1.0,
"top_p": 0.95,
- "mean_gen_len": 128,
+ "mean_gen_len": 64,
- "max_gen_len": 512,
+ "max_gen_len": 64,
"shift_fill_factor": 0.3,
"tokenizer_files": [
"tokenizer.json",
"merges.txt",
"vocab.json"
],
"model_category": "gpt_bigcode",
"model_name": "starcoder1B-v2-personal-copilot-merged"
}
```
5. Run the local server:
```
python -m mlc_chat.rest --model dist/starcoder1B-v2-personal-copilot-merged-q4f16_1/params --lib-path dist/starcoder1B-v2-personal-copilot-merged-q4f16_1/starcoder1B-v2-personal-copilot-merged-q4f16_1-metal.so
```
6. Change the endpoint of HF Code Completion extension in VS Code to point to the local server:

7. Open a new file in VS code, paste the code below and have the cursor in-between the doc quotes, so that the model tries to infill the doc string:

Voila! ⭐️
The demo at the start of this post is this 1B model running locally on my Mac laptop.
### Conclusion
In this blog plost, we saw how to finetune `starcoder` to create a personal co-pilot that knows about our code. We called it 🤗 HugCoder, as we trained it on Hugging Face code :) After looking at the data collection workflow, we compared training using QLoRA vs full fine-tuning. We also experimented by combining different LoRAs, which is still an unexplored technique in the text/code domain. For deployment, we examined remote inference using 🤗 Inference Endpoints, and also showed on-device execution of a smaller model with VS Code and MLC.
Please, let us know if you use these methods for your own codebase!
### Acknowledgements
We would like to thank [Pedro Cuenca](https://github.com/pcuenca), [Leandro von Werra](https://github.com/lvwerra), [Benjamin Bossan](https://github.com/BenjaminBossan), [Sylvain Gugger](https://github.com/sgugger) and [Loubna Ben Allal](https://github.com/loubnabnl) for their help with the writing of this blogpost.
| 8 |
0 | hf_public_repos | hf_public_repos/blog/leaderboard-decodingtrust.md | ---
title: "An Introduction to AI Secure LLM Safety Leaderboard"
thumbnail: /blog/assets/leaderboards-on-the-hub/thumbnail_decodingtrust.png
authors:
- user: danielz01
guest: true
- user: alphapav
guest: true
- user: Cometkmt
guest: true
- user: chejian
guest: true
- user: BoLi-aisecure
guest: true
---
# An Introduction to AI Secure LLM Safety Leaderboard
Given the widespread adoption of LLMs, it is critical to understand their safety and risks in different scenarios before extensive deployments in the real world. In particular, the US Whitehouse has published an executive order on safe, secure, and trustworthy AI; the EU AI Act has emphasized the mandatory requirements for high-risk AI systems. Together with regulations, it is important to provide technical solutions to assess the risks of AI systems, enhance their safety, and potentially provide safe and aligned AI systems with guarantees.
Thus, in 2023, at [Secure Learning Lab](https://boli.cs.illinois.edu/), we introduced [DecodingTrust](https://decodingtrust.github.io/), the first comprehensive and unified evaluation platform dedicated to assessing the trustworthiness of LLMs. (*This work won the [Outstanding Paper Award](https://blog.neurips.cc/2023/12/11/announcing-the-neurips-2023-paper-awards/) at NeurIPS 2023.*)
DecodingTrust provides a multifaceted evaluation framework covering eight trustworthiness perspectives: toxicity, stereotype bias, adversarial robustness, OOD robustness, robustness on adversarial demonstrations, privacy, machine ethics, and fairness. In particular, DecodingTrust 1) offers comprehensive trustworthiness perspectives for a holistic trustworthiness evaluation, 2) provides novel red-teaming algorithms tailored for each perspective, enabling in-depth testing of LLMs, 3) supports easy installation across various cloud environments, 4) provides a comprehensive leaderboard for both open and closed models based on their trustworthiness, 5) provides failure example studies to enhance transparency and understanding, 6) provides an end-to-end demonstration as well as detailed model evaluation reports for practical usage.
Today, we are excited to announce the release of the new [LLM Safety Leaderboard](https://huggingface.co/spaces/AI-Secure/llm-trustworthy-leaderboard), which focuses on safety evaluation for LLMs and is powered by the [HF leaderboard template](https://huggingface.co/demo-leaderboard-backend).
## Red-teaming Evaluation
<script type="module" src="https://gradio.s3-us-west-2.amazonaws.com/3.45.1/gradio.js"> </script>
<gradio-app theme_mode="light" space="AI-Secure/llm-trustworthy-leaderboard"></gradio-app>
DecodingTrust provides several novel red-teaming methodologies for each evaluation perspective to perform stress tests. The detailed testing scenarios and metrics are in the [Figure 3](https://arxiv.org/html/2306.11698v4/extracted/5331426/figures/taxonomy.main.png) of our paper.
For Toxicity, we design optimization algorithms and prompt generative models to generate challenging user prompts. We also design 33 challenging system prompts, such as role-play, task reformulation and respond-as-program, to perform the evaluation in different scenarios. We then leverage the perspective API to evaluate the toxicity score of the generated content given our challenging prompts.
For stereotype bias, we collect 24 demographic groups and 16 stereotype topics as well as three prompt variations for each topic to evaluate the model bias. We prompt the model 5 times and take the average as model bias scores.
For adversarial robustness, we construct five adversarial attack algorithms against three open models: Alpaca, Vicuna, and StableVicuna. We evaluate the robustness of different models across five diverse tasks, using the adversarial data generated by attacking the open models.
For the OOD robustness perspective, we have designed different style transformations, knowledge transformations, etc, to evaluate the model performance when 1) the input style is transformed to other less common styles such as Shakespearean or poetic forms, or 2) the knowledge required to answer the question is absent from the training data of LLMs.
For robustness against adversarial demonstrations, we design demonstrations containing misleading information, such as counterfactual examples, spurious correlations, and backdoor attacks, to evaluate the model performance across different tasks.
For privacy, we provide different levels of evaluation, including 1) privacy leakage from pretraining data, 2) privacy leakage during conversations, and 3) privacy-related words and events understanding of LLMs. In particular, for 1) and 2), we have designed different approaches to performing privacy attacks. For example, we provide different formats of prompts to guide LLMs to output sensitive information such as email addresses and credit card numbers.
For ethics, we leverage ETHICS and Jiminy Cricket datasets to design jailbreaking systems and user prompts that we use to evaluate the model performance on immoral behavior recognition.
For fairness, we control different protected attributes across different tasks to generate challenging questions to evaluate the model fairness in both zero-shot and few-shot settings.
## Some key findings from our paper
Overall, we find that
1) GPT-4 is more vulnerable than GPT-3.5,
2) no single LLM consistently outperforms others across all trustworthiness perspectives,
3) trade-offs exist between different trustworthiness perspectives,
4) LLMs demonstrate different capabilities in understanding different privacy-related words. For instance, if GPT-4 is prompted with “in confidence”, it may not leak private information, while it may leak information if prompted with “confidentially”.
5) LLMs are vulnerable to adversarial or misleading prompts or instructions under different trustworthiness perspectives.
## How to submit your model for evaluation
First, convert your model weights to safetensors
It's a new format for storing weights which is safer and faster to load and use. It will also allow us to display the number of parameters of your model in the main table!
Then, make sure you can load your model and tokenizer using AutoClasses:
```Python
from transformers import AutoConfig, AutoModel, AutoTokenizer
config = AutoConfig.from_pretrained("your model name")
model = AutoModel.from_pretrained("your model name")
tokenizer = AutoTokenizer.from_pretrained("your model name")
```
If this step fails, follow the error messages to debug your model before submitting it. It's likely your model has been improperly uploaded.
Notes:
- Make sure your model is public!
- We don't yet support models that require `use_remote_code=True`. But we are working on it, stay posted!
Finally, use the ["Submit here!" panel in our leaderboard](https://huggingface.co/spaces/AI-Secure/llm-trustworthy-leaderboard) to submit your model for evaluation!
## Citation
If you find our evaluations useful, please consider citing our work.
```
@article{wang2023decodingtrust,
title={DecodingTrust: A Comprehensive Assessment of Trustworthiness in GPT Models},
author={Wang, Boxin and Chen, Weixin and Pei, Hengzhi and Xie, Chulin and Kang, Mintong and Zhang, Chenhui and Xu, Chejian and Xiong, Zidi and Dutta, Ritik and Schaeffer, Rylan and others},
booktitle={Thirty-seventh Conference on Neural Information Processing Systems Datasets and Benchmarks Track},
year={2023}
}
```
| 9 |
0 | hf_public_repos/block_movement_pruning/block_movement_pruning/emmental | hf_public_repos/block_movement_pruning/block_movement_pruning/emmental/modules/masked_nn.py | # coding=utf-8
# Copyright 2020-present, the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Masked Linear module: A fully connected layer that computes an adaptive binary mask on the fly.
The mask (binary or not) is computed at each forward pass and multiplied against
the weight matrix to prune a portion of the weights.
The pruned weight matrix is then multiplied against the inputs (and if necessary, the bias is added).
"""
import itertools
import math
import random
from itertools import permutations
import torch
from torch import nn
from torch.nn import functional as F
from torch.nn import init
from .binarizer import MagnitudeBinarizer, ThresholdBinarizer, TopKBinarizer
sparse_patterns = None
AMPERE_M = 4
AMPERE_N = 2
class StaticIndexDim1(torch.autograd.Function):
@staticmethod
def forward(ctx, input, index, reverse_index):
ctx.save_for_backward(reverse_index)
return input[:, index]
@staticmethod
def backward(ctx, grad_output):
reverse_index, = ctx.saved_tensors
return grad_output[:, reverse_index], None, None
class Index0(torch.autograd.Function):
@staticmethod
def forward(ctx, input, index, reverse_index):
ctx.save_for_backward(reverse_index)
return input[index, :]
@staticmethod
def backward(ctx, grad_output):
reverse_index, = ctx.saved_tensors
return grad_output[reverse_index, :], None, None
def ampere_pattern(device=None):
global sparse_patterns, AMPERE_N, AMPERE_M
if sparse_patterns is not None:
if device is not None:
if sparse_patterns.device != device:
sparse_patterns = sparse_patterns.to(device=device)
return sparse_patterns
patterns = torch.zeros(AMPERE_M)
patterns[:AMPERE_N] = 1
sparse_patterns = torch.Tensor(list(set(permutations(patterns.tolist()))))
return sparse_patterns
class DimensionShuffler(nn.Module):
def __init__(self, in_features, out_features, in_features_group = 4, out_features_group = 4):
super().__init__()
self.in_features = in_features
self.in_features_group = in_features_group
self.out_features = out_features
self.out_features_group = out_features_group
in_mapping = self.dimension_mapping(in_features)
out_mapping = self.dimension_mapping(out_features)
out_mapping_reverse = out_mapping.sort()[1]
self.register_buffer("in_mapping", in_mapping)
self.register_buffer("out_mapping", out_mapping)
self.register_buffer("out_mapping_reverse", out_mapping_reverse)
#in_permutations = self.all_permutations(in_features_group)[2]
#self.register_buffer("in_permutations", in_permutations)
#out_permutations = self.all_permutations(out_features_group)[2]
#self.register_buffer("out_permutations", out_permutations)
in_permutation_scores = torch.randn(in_features // in_features_group, in_features_group - 1)
out_permutation_scores = torch.randn(out_features // out_features_group, out_features_group - 1)
# self.register_buffer("in_permutation_scores", in_permutation_scores)
# self.register_buffer("out_permutation_scores", out_permutation_scores)
self.in_permutation_scores = nn.Parameter(in_permutation_scores)
self.out_permutation_scores = nn.Parameter(out_permutation_scores)
@staticmethod
def rotate_matrices(angles):
assert(angles.shape[-1] == 1)
c = angles.cos()
s = angles.sin()
rot0 = torch.cat([c, -s], dim=1)
rot1 = torch.cat([s, c], dim=1)
rot = torch.stack([rot0, rot1], dim=1)
return rot, rot.transpose(1, 2)
def forward(self, input, weight, mask, temperature):
in_permutations, in_permutations_inverse = self.rotate_matrices(self.in_permutation_scores)
out_permutations, out_permutations_inverse = self.rotate_matrices(self.out_permutation_scores)
#in_permutations = self.permutation_mix(self.in_permutation_scores, self.in_permutations, temperature, self.training)
#out_permutations = self.permutation_mix(self.out_permutation_scores, self.out_permutations, temperature, self.training)
return self.permutated_linear(input,
self.in_mapping,
in_permutations,
in_permutations_inverse,
weight,
mask,
self.out_mapping,
self.out_mapping_reverse,
out_permutations,
out_permutations_inverse
)
@staticmethod
def permutation_mix(permutation_scores,
permutations,
temperature: float,
training: bool):
if training: # True
s = F.softmax(permutation_scores * temperature, dim=-1)
else:
s = torch.argmax(permutation_scores, dim=-1)
s = F.one_hot(s, num_classes=permutation_scores.shape[-1]).float()
s = s.matmul(permutations.reshape(permutations.shape[0], -1))
s = s.view(-1, *permutations.shape[1:])
return s
@staticmethod
def all_permutations(d_group):
t = torch.tensor(list(itertools.permutations(range(d_group))))
tp = t.sort(dim=1)[1]
a = torch.arange(t.shape[0]).unsqueeze(-1).expand_as(t)
c = torch.arange(d_group).unsqueeze(0).expand_as(t)
ones = torch.stack([a, c, t], dim=-1).reshape(-1, 3).t()
m = torch.zeros(t.shape[0], d_group, d_group)
m[tuple(ones)] = 1.0
ones = torch.stack([a, c, tp], dim=-1).reshape(-1, 3).t()
mp = torch.zeros(t.shape[0], d_group, d_group)
mp[tuple(ones)] = 1.0
return t, tp, m, mp
def random_permutation(iterable, r=None):
"Random selection from itertools.permutations(iterable, r)"
pool = tuple(iterable)
r = len(pool) if r is None else r
return tuple(random.sample(pool, r))
@staticmethod
def dimension_mapping(d, testing=False):
while True:
m = torch.tensor(DimensionShuffler.random_permutation(range(d)))
if testing and (m == torch.arange(d)).all():
continue
return m
@staticmethod
def sequence_batch_group_permutation(s, mapping, permutations, final=False):
d_group = permutations.shape[-1]
d = s.shape[-1]
assert ((d % d_group) == 0)
assert (len(s.shape) == 3)
s_shape = s.shape
if not final:
s = s[:, :, mapping]
s = s.reshape(s.shape[:-1] + (s.shape[-1] // d_group, d_group))
s2 = torch.einsum('ijmk,mkn->ijmn', s, permutations)
s2 = s2.reshape(s_shape)
if final:
s2 = s2[:, :, mapping]
return s2
@staticmethod
def matrix_group_permutation_inverse(matrix, mapping, permutations, permutations_inverse, transposed=False):
d_group = permutations.shape[-1]
d = matrix.shape[-1]
assert ((d % d_group) == 0)
assert (len(matrix.shape) == 2)
matrix_shape = matrix.shape
matrix = matrix[:, mapping]
matrix = matrix.reshape(matrix.shape[0], matrix.shape[1] // d_group, d_group)
permutations_m = permutations_inverse
# mnk because matrix is transposed, we should transpose permutations_m too
perm_selector = "mkn" if transposed else "mnk"
matrix2 = torch.einsum(f'imk,{perm_selector}->imn', matrix, permutations_m)
matrix2 = matrix2.reshape(matrix_shape)
return matrix2
@staticmethod
def permutated_linear(s, in_map, in_permut, in_permut_inverse, matrix, mask, out_map, out_map_inverse, out_permut, out_permut_inverse):
s_in = DimensionShuffler.sequence_batch_group_permutation(s, in_map, in_permut)
matrix2 = DimensionShuffler.matrix_group_permutation_inverse(matrix, in_map, in_permut, in_permut_inverse)
matrix3 = DimensionShuffler.matrix_group_permutation_inverse(matrix2.t(), out_map, out_permut, out_permut_inverse, transposed=True)
matrix3 = matrix3 * mask.t()
s_inner = s_in.matmul(matrix3)
s_out = DimensionShuffler.sequence_batch_group_permutation(s_inner, out_map_inverse, out_permut, final=True)
return s_out
s_ref = s.matmul(matrix.t()) # REFERENCE
max_std = (s_out - s_ref).std().item()
max_diff = (s_out - s_ref).abs().max().item()
if max_diff > 0.1:
print("max difference", max_diff)
return s_out
class MaskDimensionShuffler(nn.Module):
def __init__(self, in_features, out_features, in_features_group=4, out_features_group=4):
super().__init__()
self.in_features = in_features
self.in_features_group = in_features_group
self.out_features = out_features
self.out_features_group = out_features_group
in_mapping = self.dimension_mapping(in_features)
in_mapping_reverse = in_mapping.sort()[1]
out_mapping = self.dimension_mapping(out_features)
out_mapping_reverse = out_mapping.sort()[1]
self.register_buffer("in_mapping", in_mapping)
self.register_buffer("in_mapping_reverse", in_mapping_reverse)
self.register_buffer("out_mapping", out_mapping)
self.register_buffer("out_mapping_reverse", out_mapping_reverse)
if in_features_group == 2:
score_dim = 1
else:
# Currently not supported
assert (False)
in_permutation_scores = torch.randn(in_features // in_features_group, score_dim)
out_permutation_scores = torch.randn(out_features // out_features_group, score_dim)
self.in_permutation_scores = nn.Parameter(in_permutation_scores)
self.out_permutation_scores = nn.Parameter(out_permutation_scores)
@staticmethod
def random_permutation(iterable, r=None):
"Random selection from itertools.permutations(iterable, r)"
pool = tuple(iterable)
r = len(pool) if r is None else r
return tuple(random.sample(pool, r))
@staticmethod
def dimension_mapping(d):
return torch.tensor(MaskDimensionShuffler.random_permutation(range(d)))
@staticmethod
def rotations_2d(angles):
c = angles.cos()
s = angles.sin()
rot0 = torch.stack([c, -s], dim=-1)
rot1 = torch.stack([s, c], dim=-1)
rot = torch.stack([rot0, rot1], dim=1)
return rot
@staticmethod
def angles(scores, temperature, training):
scores_0 = torch.zeros_like(scores)
scores = torch.stack([scores_0, scores], dim=-1)
if training:
s = F.softmax(scores * temperature, dim=-1)
else:
s = torch.argmax(scores, dim=-1)
s = F.one_hot(s, num_classes=scores.shape[-1]).float()
angles = s[:, :, 1] * (math.pi * 0.5)
return angles
@staticmethod
def matrices(angles):
if angles.shape[-1] == 1:
return MaskDimensionShuffler.rotations_2d(angles.squeeze(-1))
else:
assert(False)
@staticmethod
def rotate(mask, mapping, mapping_reverse, scores, temperature, training):
# Rotate each group of n lines
mask_shape = mask.shape
# Get the rotations angles
angles0 = MaskDimensionShuffler.angles(scores, temperature, training)
mat0 = MaskDimensionShuffler.matrices(angles0)
# The mixing factors are actually the squares of each of the coefficient of the rotation matrix
mat0 = mat0 * mat0
# Apply the global random dimension remapping
if mapping is not None:
mask = StaticIndexDim1.apply(mask, mapping, mapping_reverse)
# Create the groups of dimensions
rot_dim = mat0.shape[-1]
mask = mask.view(mask_shape[0], mask_shape[1] // rot_dim, rot_dim)
# Following lines: Rotate each group: we could use an einsum, but not much more difficult to do
# Adapt the mask to the shape of matrices by repeating the last dimension
mask = mask.unsqueeze(-1).repeat(1, 1, 1, rot_dim)
# Adapt the matrices to the shape of the mask
mat0 = mat0.unsqueeze(0).repeat(mask.shape[0], 1, 1, 1)
# Finish with the sum on the right dimension
mask = (mat0 * mask).sum(-2)
# Reshape the mask to remove the temporary grouping
return mask.view(mask_shape)
@staticmethod
def final_mapping(mapping, scores):
mapping = mapping.view(1, mapping.shape[0])
mapping = MaskDimensionShuffler.rotate(mapping, None, None, scores, 0, False)
mapping = (mapping.round() + 0.25).long().squeeze(0)
return mapping
def final_mappings(self):
# Those are the mappings that should be applied to the weights
# (and so inverted)
m0 = self.final_mapping(self.in_mapping, self.in_permutation_scores)
m0_p = m0.sort()[1]
m1 = self.final_mapping(self.out_mapping, self.out_permutation_scores)
m1_p = m1.sort()[1]
return m0, m0_p, m1, m1_p
def forward(self, mask, temperature):
training = self.training
mask = self.rotate(mask, self.in_mapping, self.in_mapping_reverse, self.in_permutation_scores, temperature, training)
mask = mask.t()
mask = self.rotate(mask, self.out_mapping, self.out_mapping_reverse, self.out_permutation_scores, temperature, training)
mask = mask.t()
return mask
class MaskedLinear(nn.Linear):
"""
Fully Connected layer with on the fly adaptive mask.
If needed, a score matrix is created to store the importance of each associated weight.
"""
def __init__(
self,
in_features: int,
out_features: int,
bias: bool = True,
mask_init: str = "constant",
mask_scale: float = 0.0,
pruning_method: str = "topK",
mask_block_rows:int = 1,
mask_block_cols:int = 1,
ampere_pruning_method: str = "disabled",
ampere_mask_init: str = "constant",
ampere_mask_scale: float = 0.0,
shuffling_method:str = "disabled",
in_shuffling_group:int = 4,
out_shuffling_group:int = 4,
):
"""
Args:
in_features (`int`)
Size of each input sample
out_features (`int`)
Size of each output sample
bias (`bool`)
If set to ``False``, the layer will not learn an additive bias.
Default: ``True``
mask_init (`str`)
The initialization method for the score matrix if a score matrix is needed.
Choices: ["constant", "uniform", "kaiming"]
Default: ``constant``
mask_scale (`float`)
The initialization parameter for the chosen initialization method `mask_init`.
Default: ``0.``
pruning_method (`str`)
Method to compute the mask.
Choices: ["topK", "threshold", "sigmoied_threshold", "magnitude", "l0"]
Default: ``topK``
"""
super().__init__(in_features=in_features, out_features=out_features, bias=bias)
assert pruning_method in ["topK", "threshold", "sigmoied_threshold", "magnitude", "l0"]
self.pruning_method = pruning_method
self.mask_block_rows = mask_block_rows
self.mask_block_cols = mask_block_cols
AMPERE_METHODS = ["disabled", "annealing"]
if ampere_pruning_method not in AMPERE_METHODS:
raise RuntimeError(f"Unknown ampere pruning method '{ampere_pruning_method}', should be in {AMPERE_METHODS}")
self.ampere_pruning_method = ampere_pruning_method
SHUFFLING_METHODS = ["disabled", "annealing", "mask_annealing"]
if shuffling_method not in SHUFFLING_METHODS:
raise RuntimeError(f"Unknown shuffle method '{shuffling_method}', should be in {SHUFFLING_METHODS}")
self.shuffling_method = shuffling_method
assert in_shuffling_group >= 1
self.in_shuffling_group = in_shuffling_group
assert out_shuffling_group >= 1
self.out_shuffling_group = out_shuffling_group
self.shuffler = None
self.mask_shuffler = None
if self.shuffling_method == "annealing":
self.shuffler = DimensionShuffler(in_features=in_features,
out_features=out_features,
in_features_group=self.in_shuffling_group,
out_features_group=self.out_shuffling_group)
elif self.shuffling_method == "mask_annealing":
self.mask_shuffler = MaskDimensionShuffler(in_features=in_features,
out_features=out_features,
in_features_group=self.in_shuffling_group,
out_features_group=self.out_shuffling_group)
if self.pruning_method in ["topK", "threshold", "sigmoied_threshold", "l0"]:
self.mask_scale = mask_scale
self.mask_init = mask_init
size = self.weight.size()
assert(size[0] % self.mask_block_rows == 0)
assert(size[1] % self.mask_block_cols == 0)
mask_size = (size[0] // self.mask_block_rows, size[1] // self.mask_block_cols)
self.mask_scores = nn.Parameter(torch.Tensor(size=mask_size))
self.init_mask()
if self.ampere_pruning_method == "annealing":
self.ampere_mask_init = ampere_mask_init
self.ampere_mask_scale = ampere_mask_scale
self.initialize_ampere_permut_scores()
else:
self.register_parameter("ampere_permut_scores", None)
def initialize_ampere_permut_scores(self):
""""We must remember that weights are used in transposed form for forward pass,
which we want to optimize the most.
So we make sure we are creating an Ampere sparse pattern on the right dimension -> 0"""
assert ((self.weight.shape[0] % AMPERE_M) == 0)
sparse_patterns_count = ampere_pattern(None).shape[0]
# Creating the pattern in a transposed way to avoid a few ops later
ampere_mask_size = (self.weight.shape[1], self.weight.shape[0] // AMPERE_M, sparse_patterns_count)
self.ampere_permut_scores = nn.Parameter(torch.Tensor(size=ampere_mask_size))
if self.ampere_mask_init == "constant":
init.constant_(self.ampere_permut_scores, val=self.ampere_mask_scale)
elif self.ampere_mask_init == "uniform":
init.uniform_(self.ampere_permut_scores, a=-self.ampere_mask_scale, b=self.ampere_mask_scale)
elif self.ampere_mask_init == "kaiming":
init.kaiming_uniform_(self.ampere_permut_scores, a=math.sqrt(5))
def init_mask(self):
if self.mask_init == "constant":
init.constant_(self.mask_scores, val=self.mask_scale)
elif self.mask_init == "uniform":
init.uniform_(self.mask_scores, a=-self.mask_scale, b=self.mask_scale)
elif self.mask_init == "kaiming":
init.kaiming_uniform_(self.mask_scores, a=math.sqrt(5))
@staticmethod
def expand_mask_(mask, mask_block_rows, mask_block_cols):
mask = torch.repeat_interleave(mask, mask_block_rows, dim=0)
mask = torch.repeat_interleave(mask, mask_block_cols, dim=1)
return mask
@staticmethod
def ampere_mask_(ampere_permut_scores,
ampere_temperature: float,
device:torch.DeviceObjType,
training:bool):
if training:
s = F.softmax(ampere_permut_scores * ampere_temperature, dim=-1)
else:
s = torch.argmax(ampere_permut_scores, dim=-1)
s = F.one_hot(s, num_classes=ampere_permut_scores.shape[-1]).float()
s = s.matmul(ampere_pattern(device))
s = s.view(-1, s.shape[1] * s.shape[2])
s = s.t()
return s
@staticmethod
def check_name(name):
return name.endswith(".ampere_permut_scores") or name.endswith(".mask_scores")
@staticmethod
def mask_(weight,
pruning_method,
threshold,
mask_scores,
ampere_pruning_method,
ampere_temperature,
ampere_permut_scores,
mask_block_rows,
mask_block_cols,
training):
if pruning_method == "topK":
mask = TopKBinarizer.apply(mask_scores, threshold)
elif pruning_method in ["threshold", "sigmoied_threshold"]:
sig = "sigmoied" in pruning_method
mask = ThresholdBinarizer.apply(mask_scores, threshold, sig)
elif pruning_method == "magnitude":
mask = MagnitudeBinarizer.apply(weight, threshold)
elif pruning_method == "l0":
l, r, b = -0.1, 1.1, 2 / 3
if training:
u = torch.zeros_like(mask_scores).uniform_().clamp(0.0001, 0.9999)
s = torch.sigmoid((u.log() - (1 - u).log() + mask_scores) / b)
else:
s = torch.sigmoid(mask_scores)
s_bar = s * (r - l) + l
mask = s_bar.clamp(min=0.0, max=1.0)
# Expand block mask to individual element mask
if pruning_method != "magnitude":
mask = MaskedLinear.expand_mask_(mask,
mask_block_rows=mask_block_rows,
mask_block_cols=mask_block_cols
)
if ampere_pruning_method != "disabled":
ampere_mask = MaskedLinear.ampere_mask_(ampere_permut_scores,
ampere_temperature,
device=mask.device,
training=training)
mask = mask * ampere_mask
return mask
@staticmethod
def masked_weights_from_state_dict(state_dict,
weight_name,
pruning_method,
threshold,
ampere_pruning_method,
mask_block_rows,
mask_block_cols):
def name_for_mask(weight_name, mask_name):
new_name = weight_name.split(".")[:-1] + [mask_name]
new_name = ".".join(new_name)
parameters = {}
for name in ["weight", "mask_scores", "ampere_permut_scores"]:
parameters[name] = state_dict.get(name_for_mask(weight_name, name))
ret = MaskedLinear.masked_weights(pruning_method=pruning_method,
threshold=threshold,
ampere_pruning_method=ampere_pruning_method,
ampere_temperature=0.0,
training=False,
mask_block_rows=mask_block_rows,
mask_block_cols=mask_block_cols,
**parameters)
return ret
def expand_mask(self, mask):
return self.expand_mask_(mask, self.mask_block_rows, self.mask_block_cols)
def forward(self, input: torch.tensor, current_config: dict):
# Get the mask
threshold = current_config["threshold"]
ampere_temperature = current_config["ampere_temperature"]
shuffle_temperature = current_config["shuffling_temperature"]
mask = self.mask_(self.weight,
self.pruning_method,
threshold,
self.mask_scores,
self.ampere_pruning_method,
ampere_temperature,
self.ampere_permut_scores,
self.mask_block_rows,
self.mask_block_cols,
training=self.training)
if self.shuffler is not None:
return self.shuffler(input, self.weight, mask, shuffle_temperature) + self.bias
else:
if self.mask_shuffler is not None:
mask = self.mask_shuffler(mask, shuffle_temperature)
weight_thresholded = mask * self.weight
# Compute output (linear layer) with masked weights
return F.linear(input, weight_thresholded, self.bias)
| 0 |
0 | hf_public_repos/block_movement_pruning/block_movement_pruning | hf_public_repos/block_movement_pruning/block_movement_pruning/model_card/pruning.svg | <svg class="main-svg" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" width="800" height="400" style="" viewBox="0 0 800 400"><rect x="0" y="0" width="800" height="400" style="fill: rgb(255, 255, 255); fill-opacity: 1;"/><defs id="defs-087cac"><g class="clips"><clipPath id="clip087cacxyplot" class="plotclip"><rect width="613" height="220"/></clipPath><clipPath class="axesclip" id="clip087cacx"><rect x="80" y="0" width="613" height="400"/></clipPath><clipPath class="axesclip" id="clip087cacy"><rect x="0" y="100" width="800" height="220"/></clipPath><clipPath class="axesclip" id="clip087cacxy"><rect x="80" y="100" width="613" height="220"/></clipPath></g><g class="gradients"/></defs><g class="bglayer"><rect class="bg" x="80" y="100" width="613" height="220" style="fill: rgb(229, 236, 246); fill-opacity: 1; stroke-width: 0;"/></g><g class="layer-below"><g class="imagelayer"/><g class="shapelayer"/></g><g class="cartesianlayer"><g class="subplot xy"><g class="layer-subplot"><g class="shapelayer"/><g class="imagelayer"/></g><g class="gridlayer"><g class="x"/><g class="y"><path class="ygrid crisp" transform="translate(0,285.16999999999996)" d="M80,0h613" style="stroke: rgb(255, 255, 255); stroke-opacity: 1; stroke-width: 1px;"/><path class="ygrid crisp" transform="translate(0,250.33)" d="M80,0h613" style="stroke: rgb(255, 255, 255); stroke-opacity: 1; stroke-width: 1px;"/><path class="ygrid crisp" transform="translate(0,215.5)" d="M80,0h613" style="stroke: rgb(255, 255, 255); stroke-opacity: 1; stroke-width: 1px;"/><path class="ygrid crisp" transform="translate(0,180.67000000000002)" d="M80,0h613" style="stroke: rgb(255, 255, 255); stroke-opacity: 1; stroke-width: 1px;"/><path class="ygrid crisp" transform="translate(0,145.82999999999998)" d="M80,0h613" style="stroke: rgb(255, 255, 255); stroke-opacity: 1; stroke-width: 1px;"/><path class="ygrid crisp" transform="translate(0,111)" d="M80,0h613" style="stroke: rgb(255, 255, 255); stroke-opacity: 1; stroke-width: 1px;"/></g></g><g class="zerolinelayer"><path class="yzl zl crisp" transform="translate(0,320)" d="M80,0h613" style="stroke: rgb(255, 255, 255); stroke-opacity: 1; stroke-width: 2px;"/></g><path class="xlines-below"/><path class="ylines-below"/><g class="overlines-below"/><g class="xaxislayer-below"/><g class="yaxislayer-below"/><g class="overaxes-below"/><g class="plot" transform="translate(80, 100)" clip-path="url('#clip087cacxyplot')"><g class="barlayer mlayer"><g class="trace bars" shape-rendering="crispEdges" style="opacity: 1;"><g class="points"><g class="point"><path d="M5.11,220V150.33H45.98V220Z" style="vector-effect: non-scaling-stroke; opacity: 1; stroke-width: 0.5px; fill: rgb(0, 0, 255); fill-opacity: 1; stroke: rgb(229, 236, 246); stroke-opacity: 1;"/><text class="bartext bartext-inside" text-anchor="middle" data-notex="1" x="0" y="0" style="font-family: 'Open Sans', verdana, arial, sans-serif; font-size: 12px; fill: rgb(255, 255, 255); fill-opacity: 1; white-space: pre;" transform="translate(25.380937499999998,164.33)">4</text></g><g class="point"><path d="M56.19,220V167.75H97.06V220Z" style="vector-effect: non-scaling-stroke; opacity: 1; stroke-width: 0.5px; fill: rgb(0, 0, 255); fill-opacity: 1; stroke: rgb(229, 236, 246); stroke-opacity: 1;"/><text class="bartext bartext-inside" text-anchor="middle" data-notex="1" x="0" y="0" style="font-family: 'Open Sans', verdana, arial, sans-serif; font-size: 12px; fill: rgb(255, 255, 255); fill-opacity: 1; white-space: pre;" transform="translate(76.4609375,181.75)">3</text></g><g class="point"><path d="M107.28,220V150.33H148.14V220Z" style="vector-effect: non-scaling-stroke; opacity: 1; stroke-width: 0.5px; fill: rgb(0, 0, 255); fill-opacity: 1; stroke: rgb(229, 236, 246); stroke-opacity: 1;"/><text class="bartext bartext-inside" text-anchor="middle" data-notex="1" x="0" y="0" style="font-family: 'Open Sans', verdana, arial, sans-serif; font-size: 12px; fill: rgb(255, 255, 255); fill-opacity: 1; white-space: pre;" transform="translate(127.5459375,164.33)">4</text></g><g class="point"><path d="M158.36,220V115.5H199.23V220Z" style="vector-effect: non-scaling-stroke; opacity: 1; stroke-width: 0.5px; fill: rgb(0, 0, 255); fill-opacity: 1; stroke: rgb(229, 236, 246); stroke-opacity: 1;"/><text class="bartext bartext-inside" text-anchor="middle" data-notex="1" x="0" y="0" style="font-family: 'Open Sans', verdana, arial, sans-serif; font-size: 12px; fill: rgb(255, 255, 255); fill-opacity: 1; white-space: pre;" transform="translate(178.63093750000002,129.5)">6</text></g><g class="point"><path d="M209.44,220V150.33H250.31V220Z" style="vector-effect: non-scaling-stroke; opacity: 1; stroke-width: 0.5px; fill: rgb(0, 0, 255); fill-opacity: 1; stroke: rgb(229, 236, 246); stroke-opacity: 1;"/><text class="bartext bartext-inside" text-anchor="middle" data-notex="1" x="0" y="0" style="font-family: 'Open Sans', verdana, arial, sans-serif; font-size: 12px; fill: rgb(255, 255, 255); fill-opacity: 1; white-space: pre;" transform="translate(229.7109375,164.33)">4</text></g><g class="point"><path d="M260.52,220V150.33H301.39V220Z" style="vector-effect: non-scaling-stroke; opacity: 1; stroke-width: 0.5px; fill: rgb(0, 0, 255); fill-opacity: 1; stroke: rgb(229, 236, 246); stroke-opacity: 1;"/><text class="bartext bartext-inside" text-anchor="middle" data-notex="1" x="0" y="0" style="font-family: 'Open Sans', verdana, arial, sans-serif; font-size: 12px; fill: rgb(255, 255, 255); fill-opacity: 1; white-space: pre;" transform="translate(280.7909375,164.33)">4</text></g><g class="point"><path d="M311.61,220V132.92H352.48V220Z" style="vector-effect: non-scaling-stroke; opacity: 1; stroke-width: 0.5px; fill: rgb(0, 0, 255); fill-opacity: 1; stroke: rgb(229, 236, 246); stroke-opacity: 1;"/><text class="bartext bartext-inside" text-anchor="middle" data-notex="1" x="0" y="0" style="font-family: 'Open Sans', verdana, arial, sans-serif; font-size: 12px; fill: rgb(255, 255, 255); fill-opacity: 1; white-space: pre;" transform="translate(331.8809375,146.92)">5</text></g><g class="point"><path d="M362.69,220V98.08H403.56V220Z" style="vector-effect: non-scaling-stroke; opacity: 1; stroke-width: 0.5px; fill: rgb(0, 0, 255); fill-opacity: 1; stroke: rgb(229, 236, 246); stroke-opacity: 1;"/><text class="bartext bartext-inside" text-anchor="middle" data-notex="1" x="0" y="0" style="font-family: 'Open Sans', verdana, arial, sans-serif; font-size: 12px; fill: rgb(255, 255, 255); fill-opacity: 1; white-space: pre;" transform="translate(382.9609375,112.08)">7</text></g><g class="point"><path d="M413.78,220V150.33H454.64V220Z" style="vector-effect: non-scaling-stroke; opacity: 1; stroke-width: 0.5px; fill: rgb(0, 0, 255); fill-opacity: 1; stroke: rgb(229, 236, 246); stroke-opacity: 1;"/><text class="bartext bartext-inside" text-anchor="middle" data-notex="1" x="0" y="0" style="font-family: 'Open Sans', verdana, arial, sans-serif; font-size: 12px; fill: rgb(255, 255, 255); fill-opacity: 1; white-space: pre;" transform="translate(434.0459375,164.33)">4</text></g><g class="point"><path d="M464.86,220V132.92H505.73V220Z" style="vector-effect: non-scaling-stroke; opacity: 1; stroke-width: 0.5px; fill: rgb(0, 0, 255); fill-opacity: 1; stroke: rgb(229, 236, 246); stroke-opacity: 1;"/><text class="bartext bartext-inside" text-anchor="middle" data-notex="1" x="0" y="0" style="font-family: 'Open Sans', verdana, arial, sans-serif; font-size: 12px; fill: rgb(255, 255, 255); fill-opacity: 1; white-space: pre;" transform="translate(485.1309375,146.92)">5</text></g><g class="point"><path d="M515.94,220V132.92H556.81V220Z" style="vector-effect: non-scaling-stroke; opacity: 1; stroke-width: 0.5px; fill: rgb(0, 0, 255); fill-opacity: 1; stroke: rgb(229, 236, 246); stroke-opacity: 1;"/><text class="bartext bartext-inside" text-anchor="middle" data-notex="1" x="0" y="0" style="font-family: 'Open Sans', verdana, arial, sans-serif; font-size: 12px; fill: rgb(255, 255, 255); fill-opacity: 1; white-space: pre;" transform="translate(536.2109375,146.92)">5</text></g><g class="point"><path d="M567.03,220V167.75H607.89V220Z" style="vector-effect: non-scaling-stroke; opacity: 1; stroke-width: 0.5px; fill: rgb(0, 0, 255); fill-opacity: 1; stroke: rgb(229, 236, 246); stroke-opacity: 1;"/><text class="bartext bartext-inside" text-anchor="middle" data-notex="1" x="0" y="0" style="font-family: 'Open Sans', verdana, arial, sans-serif; font-size: 12px; fill: rgb(255, 255, 255); fill-opacity: 1; white-space: pre;" transform="translate(587.2959375,181.75)">3</text></g></g></g><g class="trace bars" shape-rendering="crispEdges" style="opacity: 1;"><g class="points"><g class="point"><path d="M5.11,150.33V11H45.98V150.33Z" style="vector-effect: non-scaling-stroke; opacity: 1; stroke-width: 0.5px; fill: rgb(255, 204, 204); fill-opacity: 1; stroke: rgb(229, 236, 246); stroke-opacity: 1;"/><text class="bartext bartext-inside" text-anchor="middle" data-notex="1" x="0" y="0" style="font-family: 'Open Sans', verdana, arial, sans-serif; font-size: 12px; fill: rgb(68, 68, 68); fill-opacity: 1; white-space: pre;" transform="translate(25.380937499999998,25)">8</text></g><g class="point"><path d="M56.19,167.75V11H97.06V167.75Z" style="vector-effect: non-scaling-stroke; opacity: 1; stroke-width: 0.5px; fill: rgb(255, 204, 204); fill-opacity: 1; stroke: rgb(229, 236, 246); stroke-opacity: 1;"/><text class="bartext bartext-inside" text-anchor="middle" data-notex="1" x="0" y="0" style="font-family: 'Open Sans', verdana, arial, sans-serif; font-size: 12px; fill: rgb(68, 68, 68); fill-opacity: 1; white-space: pre;" transform="translate(76.4609375,25)">9</text></g><g class="point"><path d="M107.28,150.33V11H148.14V150.33Z" style="vector-effect: non-scaling-stroke; opacity: 1; stroke-width: 0.5px; fill: rgb(255, 204, 204); fill-opacity: 1; stroke: rgb(229, 236, 246); stroke-opacity: 1;"/><text class="bartext bartext-inside" text-anchor="middle" data-notex="1" x="0" y="0" style="font-family: 'Open Sans', verdana, arial, sans-serif; font-size: 12px; fill: rgb(68, 68, 68); fill-opacity: 1; white-space: pre;" transform="translate(127.5459375,25)">8</text></g><g class="point"><path d="M158.36,115.5V11H199.23V115.5Z" style="vector-effect: non-scaling-stroke; opacity: 1; stroke-width: 0.5px; fill: rgb(255, 204, 204); fill-opacity: 1; stroke: rgb(229, 236, 246); stroke-opacity: 1;"/><text class="bartext bartext-inside" text-anchor="middle" data-notex="1" x="0" y="0" style="font-family: 'Open Sans', verdana, arial, sans-serif; font-size: 12px; fill: rgb(68, 68, 68); fill-opacity: 1; white-space: pre;" transform="translate(178.63093750000002,25)">6</text></g><g class="point"><path d="M209.44,150.33V11H250.31V150.33Z" style="vector-effect: non-scaling-stroke; opacity: 1; stroke-width: 0.5px; fill: rgb(255, 204, 204); fill-opacity: 1; stroke: rgb(229, 236, 246); stroke-opacity: 1;"/><text class="bartext bartext-inside" text-anchor="middle" data-notex="1" x="0" y="0" style="font-family: 'Open Sans', verdana, arial, sans-serif; font-size: 12px; fill: rgb(68, 68, 68); fill-opacity: 1; white-space: pre;" transform="translate(229.7109375,25)">8</text></g><g class="point"><path d="M260.52,150.33V11H301.39V150.33Z" style="vector-effect: non-scaling-stroke; opacity: 1; stroke-width: 0.5px; fill: rgb(255, 204, 204); fill-opacity: 1; stroke: rgb(229, 236, 246); stroke-opacity: 1;"/><text class="bartext bartext-inside" text-anchor="middle" data-notex="1" x="0" y="0" style="font-family: 'Open Sans', verdana, arial, sans-serif; font-size: 12px; fill: rgb(68, 68, 68); fill-opacity: 1; white-space: pre;" transform="translate(280.7909375,25)">8</text></g><g class="point"><path d="M311.61,132.92V11H352.48V132.92Z" style="vector-effect: non-scaling-stroke; opacity: 1; stroke-width: 0.5px; fill: rgb(255, 204, 204); fill-opacity: 1; stroke: rgb(229, 236, 246); stroke-opacity: 1;"/><text class="bartext bartext-inside" text-anchor="middle" data-notex="1" x="0" y="0" style="font-family: 'Open Sans', verdana, arial, sans-serif; font-size: 12px; fill: rgb(68, 68, 68); fill-opacity: 1; white-space: pre;" transform="translate(331.8809375,25)">7</text></g><g class="point"><path d="M362.69,98.08V11H403.56V98.08Z" style="vector-effect: non-scaling-stroke; opacity: 1; stroke-width: 0.5px; fill: rgb(255, 204, 204); fill-opacity: 1; stroke: rgb(229, 236, 246); stroke-opacity: 1;"/><text class="bartext bartext-inside" text-anchor="middle" data-notex="1" x="0" y="0" style="font-family: 'Open Sans', verdana, arial, sans-serif; font-size: 12px; fill: rgb(68, 68, 68); fill-opacity: 1; white-space: pre;" transform="translate(382.9609375,25)">5</text></g><g class="point"><path d="M413.78,150.33V11H454.64V150.33Z" style="vector-effect: non-scaling-stroke; opacity: 1; stroke-width: 0.5px; fill: rgb(255, 204, 204); fill-opacity: 1; stroke: rgb(229, 236, 246); stroke-opacity: 1;"/><text class="bartext bartext-inside" text-anchor="middle" data-notex="1" x="0" y="0" style="font-family: 'Open Sans', verdana, arial, sans-serif; font-size: 12px; fill: rgb(68, 68, 68); fill-opacity: 1; white-space: pre;" transform="translate(434.0459375,25)">8</text></g><g class="point"><path d="M464.86,132.92V11H505.73V132.92Z" style="vector-effect: non-scaling-stroke; opacity: 1; stroke-width: 0.5px; fill: rgb(255, 204, 204); fill-opacity: 1; stroke: rgb(229, 236, 246); stroke-opacity: 1;"/><text class="bartext bartext-inside" text-anchor="middle" data-notex="1" x="0" y="0" style="font-family: 'Open Sans', verdana, arial, sans-serif; font-size: 12px; fill: rgb(68, 68, 68); fill-opacity: 1; white-space: pre;" transform="translate(485.1309375,25)">7</text></g><g class="point"><path d="M515.94,132.92V11H556.81V132.92Z" style="vector-effect: non-scaling-stroke; opacity: 1; stroke-width: 0.5px; fill: rgb(255, 204, 204); fill-opacity: 1; stroke: rgb(229, 236, 246); stroke-opacity: 1;"/><text class="bartext bartext-inside" text-anchor="middle" data-notex="1" x="0" y="0" style="font-family: 'Open Sans', verdana, arial, sans-serif; font-size: 12px; fill: rgb(68, 68, 68); fill-opacity: 1; white-space: pre;" transform="translate(536.2109375,25)">7</text></g><g class="point"><path d="M567.03,167.75V11H607.89V167.75Z" style="vector-effect: non-scaling-stroke; opacity: 1; stroke-width: 0.5px; fill: rgb(255, 204, 204); fill-opacity: 1; stroke: rgb(229, 236, 246); stroke-opacity: 1;"/><text class="bartext bartext-inside" text-anchor="middle" data-notex="1" x="0" y="0" style="font-family: 'Open Sans', verdana, arial, sans-serif; font-size: 12px; fill: rgb(68, 68, 68); fill-opacity: 1; white-space: pre;" transform="translate(587.2959375,25)">9</text></g></g></g></g></g><g class="overplot"/><path class="xlines-above crisp" d="M0,0" style="fill: none;"/><path class="ylines-above crisp" d="M0,0" style="fill: none;"/><g class="overlines-above"/><g class="xaxislayer-above"><g class="xtick"><text text-anchor="middle" x="0" y="335" transform="translate(105.53999999999999,0)" style="font-family: 'Open Sans', verdana, arial, sans-serif; font-size: 14px; fill: rgb(42, 63, 95); fill-opacity: 1; white-space: pre;">0</text></g><g class="xtick"><text text-anchor="middle" x="0" y="335" transform="translate(156.63,0)" style="font-family: 'Open Sans', verdana, arial, sans-serif; font-size: 14px; fill: rgb(42, 63, 95); fill-opacity: 1; white-space: pre;">1</text></g><g class="xtick"><text text-anchor="middle" x="0" y="335" transform="translate(207.70999999999998,0)" style="font-family: 'Open Sans', verdana, arial, sans-serif; font-size: 14px; fill: rgb(42, 63, 95); fill-opacity: 1; white-space: pre;">2</text></g><g class="xtick"><text text-anchor="middle" x="0" y="335" transform="translate(258.78999999999996,0)" style="font-family: 'Open Sans', verdana, arial, sans-serif; font-size: 14px; fill: rgb(42, 63, 95); fill-opacity: 1; white-space: pre;">3</text></g><g class="xtick"><text text-anchor="middle" x="0" y="335" transform="translate(309.88,0)" style="font-family: 'Open Sans', verdana, arial, sans-serif; font-size: 14px; fill: rgb(42, 63, 95); fill-opacity: 1; white-space: pre;">4</text></g><g class="xtick"><text text-anchor="middle" x="0" y="335" transform="translate(360.96,0)" style="font-family: 'Open Sans', verdana, arial, sans-serif; font-size: 14px; fill: rgb(42, 63, 95); fill-opacity: 1; white-space: pre;">5</text></g><g class="xtick"><text text-anchor="middle" x="0" y="335" transform="translate(412.04,0)" style="font-family: 'Open Sans', verdana, arial, sans-serif; font-size: 14px; fill: rgb(42, 63, 95); fill-opacity: 1; white-space: pre;">6</text></g><g class="xtick"><text text-anchor="middle" x="0" y="335" transform="translate(463.13,0)" style="font-family: 'Open Sans', verdana, arial, sans-serif; font-size: 14px; fill: rgb(42, 63, 95); fill-opacity: 1; white-space: pre;">7</text></g><g class="xtick"><text text-anchor="middle" x="0" y="335" transform="translate(514.21,0)" style="font-family: 'Open Sans', verdana, arial, sans-serif; font-size: 14px; fill: rgb(42, 63, 95); fill-opacity: 1; white-space: pre;">8</text></g><g class="xtick"><text text-anchor="middle" x="0" y="335" transform="translate(565.29,0)" style="font-family: 'Open Sans', verdana, arial, sans-serif; font-size: 14px; fill: rgb(42, 63, 95); fill-opacity: 1; white-space: pre;">9</text></g><g class="xtick"><text text-anchor="middle" x="0" y="335" transform="translate(616.38,0)" style="font-family: 'Open Sans', verdana, arial, sans-serif; font-size: 14px; fill: rgb(42, 63, 95); fill-opacity: 1; white-space: pre;">10</text></g><g class="xtick"><text text-anchor="middle" x="0" y="335" transform="translate(667.46,0)" style="font-family: 'Open Sans', verdana, arial, sans-serif; font-size: 14px; fill: rgb(42, 63, 95); fill-opacity: 1; white-space: pre;">11</text></g></g><g class="yaxislayer-above"><g class="ytick"><text text-anchor="end" x="79" y="4.8999999999999995" transform="translate(0,320)" style="font-family: 'Open Sans', verdana, arial, sans-serif; font-size: 14px; fill: rgb(42, 63, 95); fill-opacity: 1; white-space: pre;">0</text></g><g class="ytick"><text text-anchor="end" x="79" y="4.8999999999999995" transform="translate(0,285.16999999999996)" style="font-family: 'Open Sans', verdana, arial, sans-serif; font-size: 14px; fill: rgb(42, 63, 95); fill-opacity: 1; white-space: pre;">2</text></g><g class="ytick"><text text-anchor="end" x="79" y="4.8999999999999995" transform="translate(0,250.33)" style="font-family: 'Open Sans', verdana, arial, sans-serif; font-size: 14px; fill: rgb(42, 63, 95); fill-opacity: 1; white-space: pre;">4</text></g><g class="ytick"><text text-anchor="end" x="79" y="4.8999999999999995" transform="translate(0,215.5)" style="font-family: 'Open Sans', verdana, arial, sans-serif; font-size: 14px; fill: rgb(42, 63, 95); fill-opacity: 1; white-space: pre;">6</text></g><g class="ytick"><text text-anchor="end" x="79" y="4.8999999999999995" transform="translate(0,180.67000000000002)" style="font-family: 'Open Sans', verdana, arial, sans-serif; font-size: 14px; fill: rgb(42, 63, 95); fill-opacity: 1; white-space: pre;">8</text></g><g class="ytick"><text text-anchor="end" x="79" y="4.8999999999999995" transform="translate(0,145.82999999999998)" style="font-family: 'Open Sans', verdana, arial, sans-serif; font-size: 14px; fill: rgb(42, 63, 95); fill-opacity: 1; white-space: pre;">10</text></g><g class="ytick"><text text-anchor="end" x="79" y="4.8999999999999995" transform="translate(0,111)" style="font-family: 'Open Sans', verdana, arial, sans-serif; font-size: 14px; fill: rgb(42, 63, 95); fill-opacity: 1; white-space: pre;">12</text></g></g><g class="overaxes-above"/></g></g><g class="polarlayer"/><g class="ternarylayer"/><g class="geolayer"/><g class="funnelarealayer"/><g class="pielayer"/><g class="treemaplayer"/><g class="sunburstlayer"/><g class="glimages"/><defs id="topdefs-087cac"><g class="clips"/><clipPath id="legend087cac"><rect width="83" height="48" x="0" y="0"/></clipPath></defs><g class="layer-above"><g class="imagelayer"/><g class="shapelayer"/></g><g class="infolayer"><g class="legend" pointer-events="all" transform="translate(705.26, 100)"><rect class="bg" shape-rendering="crispEdges" width="83" height="48" x="0" y="0" style="stroke: rgb(68, 68, 68); stroke-opacity: 1; fill: rgb(255, 255, 255); fill-opacity: 1; stroke-width: 0px;"/><g class="scrollbox" transform="translate(0, 0)" clip-path="url('#legend087cac')"><g class="groups"><g class="traces" transform="translate(0, 14.5)" style="opacity: 1;"><text class="legendtext" text-anchor="start" x="40" y="4.680000000000001" style="font-family: 'Open Sans', verdana, arial, sans-serif; font-size: 12px; fill: rgb(42, 63, 95); fill-opacity: 1; white-space: pre;">pruned</text><g class="layers" style="opacity: 1;"><g class="legendfill"/><g class="legendlines"/><g class="legendsymbols"><g class="legendpoints"><path class="legendundefined" d="M6,6H-6V-6H6Z" transform="translate(20,0)" style="stroke-width: 0.5px; fill: rgb(255, 204, 204); fill-opacity: 1; stroke: rgb(229, 236, 246); stroke-opacity: 1;"/></g></g></g><rect class="legendtoggle" x="0" y="-9.5" width="77.375" height="19" style="fill: rgb(0, 0, 0); fill-opacity: 0;"/></g><g class="traces" transform="translate(0, 33.5)" style="opacity: 1;"><text class="legendtext" text-anchor="start" x="40" y="4.680000000000001" style="font-family: 'Open Sans', verdana, arial, sans-serif; font-size: 12px; fill: rgb(42, 63, 95); fill-opacity: 1; white-space: pre;">active</text><g class="layers" style="opacity: 1;"><g class="legendfill"/><g class="legendlines"/><g class="legendsymbols"><g class="legendpoints"><path class="legendundefined" d="M6,6H-6V-6H6Z" transform="translate(20,0)" style="stroke-width: 0.5px; fill: rgb(0, 0, 255); fill-opacity: 1; stroke: rgb(229, 236, 246); stroke-opacity: 1;"/></g></g></g><rect class="legendtoggle" x="0" y="-9.5" width="77.375" height="19" style="fill: rgb(0, 0, 0); fill-opacity: 0;"/></g></g></g><rect class="scrollbar" rx="20" ry="3" width="0" height="0" x="0" y="0" style="fill: rgb(128, 139, 164); fill-opacity: 1;"/></g><g class="g-gtitle"><text class="gtitle" x="40" y="50" text-anchor="start" dy="0em" style="font-family: 'Open Sans', verdana, arial, sans-serif; font-size: 17px; fill: rgb(42, 63, 95); opacity: 1; font-weight: normal; white-space: pre;">Pruned Transformer Heads</text></g><g class="g-xtitle"><text class="xtitle" x="386.5" y="364.2" text-anchor="middle" style="font-family: 'Open Sans', verdana, arial, sans-serif; font-size: 16px; fill: rgb(42, 63, 95); opacity: 1; font-weight: normal; white-space: pre;">Layer index</text></g><g class="g-ytitle"><text class="ytitle" transform="rotate(-90,37.221875,210)" x="37.221875" y="210" text-anchor="middle" style="font-family: 'Open Sans', verdana, arial, sans-serif; font-size: 16px; fill: rgb(42, 63, 95); opacity: 1; font-weight: normal; white-space: pre;">Heads count</text></g></g></svg> | 1 |
0 | hf_public_repos | hf_public_repos/awesome-papers/README.md | # Awesome NLP Paper Discussions
The Hugging Face team believes that we can reach our goals in NLP by building powerful open source tools and by conducting impactful research. Our team has begun holding regular internal discussions about awesome papers and research areas in NLP. In the spirit of open science, we've decided to share these discussion materials with the community.
_Note: These science day discussions are held offline with no physical presentation or discussion to provide. However, some presentation materials do include limited comments from our team or summaries of internal discussions._
See [planned future discussions](#planned-discussions) below.
#### August 12, 2020
- **Paper**: [Pre-training via Paraphrasing](https://arxiv.org/abs/2006.15020)
- **Authors**: [Mike Lewis](https://twitter.com/ml_perception), [Marjan Ghazvininejad](https://twitter.com/gh_marjan), [Gargi Ghosh](https://twitter.com/gargighosh), Armen Aghajanyan, [Sida Wang](https://twitter.com/sidawxyz), [Luke Zettlemoyer](https://twitter.com/lukezettlemoyer)
- **Presenter**: [Sam Shleifer](https://twitter.com/sam_shleifer)
- **Presentation**: [Forum Summary](https://discuss.huggingface.co/t/science-tuesday-marge/685)
- **[Community Discussion](https://discuss.huggingface.co/t/science-tuesday-marge/685)**
<img src="images/marge.png" width="600pt">
#### June 23, 2020
- **Paper**: [Weight Poisoning Attacks on Pre-trained Models](https://arxiv.org/abs/2004.06660)
- **Authors**: Keita Kurita, [Paul Michel](https://twitter.com/pmichelX), [Graham Neubig](https://twitter.com/gneubig)
- **Presenter**: [Joe Davison](https://twitter.com/joeddav)
- **Presentation**: [Colab notebook/post](https://colab.research.google.com/drive/1BzdevUCFUSs_8z_rIP47VyKAlvfK1cCB?usp=sharing)
- **[Community Discussion](https://github.com/huggingface/awesome-papers/discussions/8)**
<img src="images/evil_bert.png" width="600pt">
#### June 18, 2020
- **Paper**: [Linformer: Self-Attention with Linear Complexity](https://arxiv.org/abs/2006.04768)
- **Authors**: [Sinong Wang](https://twitter.com/sinongwang), [Belinda Li](https://twitter.com/belindazli), Madian Khabsa, [Han Fang](https://twitter.com/Han_Fang_), Hao Ma
- **Presenter**: [Teven Le Scao](https://twitter.com/Fluke_Ellington)
- **Presentation**: [Tutorial Blog Post](https://tevenlescao.github.io/blog/fastpages/jupyter/2020/06/18/JL-Lemma-+-Linformer.html)
- **[Community Discussion](https://github.com/huggingface/awesome-papers/discussions/7)**
<img src="images/linformer.png" width="600pt">
#### June 9, 2020
- **Paper**: [Evaluating NLP Models via Contrast Sets](https://arxiv.org/abs/2004.02709)
- **Authors**: [Matt Gardner](https://twitter.com/nlpmattg), [Yoav Artzi](https://twitter.com/yoavartzi), Victoria Basmova, [Jonathan Berant](https://twitter.com/JonathanBerant), [Ben Bogin](https://twitter.com/ben_bogin), [Sihao Chen](https://twitter.com/soshsihao), [Pradeep Dasigi](https://twitter.com/pdasigi), [Dheeru Dua](https://twitter.com/ddua17), [Yanai Elazar](https://twitter.com/yanaiela), Ananth Gottumukkala, [Nitish Gupta](https://twitter.com/yanaiela), [Hanna Hajishirzi](https://twitter.com/HannaHajishirzi), [Gabriel Ilharco](https://twitter.com/gabriel_ilharco), [Daniel Khashabi](https://twitter.com/DanielKhashabi), [Kevin Lin](https://twitter.com/nlpkevinl), Jiangming Liu, [Nelson F. Liu](https://twitter.com/nelsonfliu), Phoebe Mulcaire, [Qiang Ning](https://twitter.com/qiangning), [Sameer Singh](https://twitter.com/sameer_), [Noah A. Smith](https://twitter.com/nlpnoah), [Sanjay Subramanian](https://twitter.com/sanjayssub), [Reut Tsarfaty](https://twitter.com/rtsarfaty), [Eric Wallace](https://twitter.com/Eric_Wallace_), Ally Zhang, [Ben Zhou](https://twitter.com/BenZhou96)
- **Presenter**: [Victor Sanh](https://twitter.com/SanhEstPasMoi)
- **Presentation**: [Slides](https://docs.google.com/presentation/d/1DfA2xi0JBSbqQ0hJrhI0jzANwjSaxV7odOA73lPfHjo/edit?usp=sharing)
<img src="images/contrast_sets.png" width="300pt">
#### May 18, 2020
- **Paper**: [Movement Pruning: Adaptive Sparsity by Fine-Tuning](https://arxiv.org/abs/2005.07683)
- **Authors**: [Victor Sanh](https://twitter.com/SanhEstPasMoi), [Thomas Wolf](https://twitter.com/Thom_Wolf), [Alexander M. Rush](https://twitter.com/srush_nlp)
- **Presenter**: [Victor Sanh](https://twitter.com/SanhEstPasMoi)
- **Presentation**: [Slideshare](https://www.slideshare.net/VictorSanh/movement-pruning-explain-like-im-five-234205241)
<img src="images/movement.png" width="600pt">
#### May 5, 2020
- **Paper**: [Von Mises-Fisher Loss for Training Sequence to Sequence Models with Continuous Outputs](https://arxiv.org/abs/1812.04616)
- **Authors**: [Sachin Kumar](https://twitter.com/shocheen), Yulia Tsvetkov
- **Presenter**: [Victor Sanh](https://twitter.com/SanhEstPasMoi)
- **Presentation**: [Colab notebook](https://colab.research.google.com/drive/1040xlv5WkLo_Xli0FpA2_bxyfsMouZ-w)
<img src="images/vmfvscos.png" width="600pt">
#### April 22, 2020
- **Topic**: Transfer Learning in Natural Language Processing (NLP): Open questions, current trends, limits, and future directions
- **Presenter**: [Thomas Wolf](https://twitter.com/Thom_Wolf)
- **Presentation**: [Video](https://www.youtube.com/watch?v=G5lmya6eKtc)
<img src="images/transfer-learning.png" width="600pt">
#### April 7, 2020
- **Topic**: Overview of recent work on: Indexing and Retrieval for Open Domain Question Answering
- **Presenter**: [Yacine Jernite](https://twitter.com/YJernite)
- **Presentation**: [Slides](https://docs.google.com/presentation/d/1A5wJEzFYGdNem7egJ-BTm6EMI3jGNe1lalyChYL54gw)
<img src="images/denspi.png" width="600pt">
#### March 24, 2020
- **Paper**: [Scaling Laws for Neural Language Models](https://arxiv.org/abs/2001.08361)
- **Authors**: Jared Kaplan, Sam McCandlish, Tom Henighan, Tom B. Brown, Benjamin Chess, Rewon Child, [Scott Gray](https://twitter.com/scottgray76), [Alec Radford](https://twitter.com/AlecRad), Jeffrey Wu, Dario Amodei
- **Presenter**: [Teven Le Scao](https://twitter.com/Fluke_Ellington)
- **Presentation**: [Google doc paper tutorial](https://docs.google.com/document/d/1Rye61octaEF6FPHN3E7Bn2s-W3AWgMi1hukxrbkBmgY/edit#heading=h.s0a83j1o76km)
<img src="images/scaling_laws.png" width="600pt">
#### March 17, 2020
- **Paper**: [Representation Learning with Contrastive Predictive Coding](https://arxiv.org/abs/1807.03748)
- **Authors**: [Aaron van den Oord](https://twitter.com/avdnoord), Yazhe Li, Oriol Vinyals
- **Presenter** [Patrick von Platen](https://twitter.com/PatrickPlaten)
- **Presentation**: [Slides](https://docs.google.com/presentation/d/1qxt7otjFI8iQSCpwzwTNei4_n4e4CIczC6nwy3jdiJY/edit?usp=sharing)
<img src="images/cpc.png" width="500pt">
#### March 10, 2020
- **Paper**: [Right for the Wrong Reasons: Diagnosing Syntactic Heuristics in Natural Language Inference
](https://arxiv.org/abs/1902.01007)
- **Authors**: [R. Thomas McCoy](https://twitter.com/RTomMcCoy), Ellie Pavlick, [Tal Linzen](https://twitter.com/tallinzen)
- **Presenter**: [Victor Sanh](https://twitter.com/SanhEstPasMoi)
- **Presentation**: [Slides](https://docs.google.com/presentation/d/15waw0-rr4RmPx0dhEzhNhkSiFnNqhvjm66IufWbRLyw/edit?usp=sharing)
<img src="images/hans.png" width="500pt">
#### March 3, 2020
- **Paper**: [REALM: Retrieval-Augmented Language Model Pre-Training](https://arxiv.org/abs/2002.08909)
- **Authors**: [Kelvin Guu](https://twitter.com/kelvin_guu), [Kenton Lee](https://twitter.com/kentonctlee), Zora Tung, [Panupong Pasupat](https://twitter.com/IcePasupat), [Ming-Wei Chang](https://twitter.com/mchang21)
- **Presenter**: [Joe Davison](https://twitter.com/joeddav)
- **Presentation**: [Write-up](https://joeddav.github.io/blog/2020/03/03/REALM.html)
<img src="images/realm.png" width="400pt">
#### February 25, 2020
- **Paper**: [Adaptively Sparse Transformers](https://arxiv.org/abs/1909.00015)
- **Authors**: Gonçalo M. Correia, [Vlad Niculae](https://twitter.com/vnfrombucharest), André F.T. Martins
- **Presenter**: [Sasha Rush](https://twitter.com/srush_nlp)
- **Presentation**: [Colab notebook](https://colab.research.google.com/drive/1EB7MI_3gzAR1gFwPPO27YU9uYzE_odSu)
<img src="images/sparse.png" width="600pt">
### Planned Discussions
No planned discussions for the moment, check back soon.
| 2 |
0 | hf_public_repos | hf_public_repos/accelerate/setup.py | # Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from setuptools import find_packages, setup
extras = {}
extras["quality"] = [
"black ~= 23.1", # hf-doc-builder has a hidden dependency on `black`
"hf-doc-builder >= 0.3.0",
"ruff ~= 0.6.4",
]
extras["docs"] = []
extras["test_prod"] = ["pytest>=7.2.0,<=8.0.0", "pytest-xdist", "pytest-subtests", "parameterized"]
extras["test_dev"] = [
"datasets",
"diffusers",
"evaluate",
"torchdata>=0.8.0",
"torchpippy>=0.2.0",
"transformers",
"scipy",
"scikit-learn",
"tqdm",
"bitsandbytes",
"timm",
]
extras["testing"] = extras["test_prod"] + extras["test_dev"]
extras["deepspeed"] = ["deepspeed"]
extras["rich"] = ["rich"]
extras["test_trackers"] = ["wandb", "comet-ml", "tensorboard", "dvclive"]
extras["dev"] = extras["quality"] + extras["testing"] + extras["rich"]
extras["sagemaker"] = [
"sagemaker", # boto3 is a required package in sagemaker
]
setup(
name="accelerate",
version="1.2.0.dev0",
description="Accelerate",
long_description=open("README.md", encoding="utf-8").read(),
long_description_content_type="text/markdown",
keywords="deep learning",
license="Apache",
author="The HuggingFace team",
author_email="[email protected]",
url="https://github.com/huggingface/accelerate",
package_dir={"": "src"},
packages=find_packages("src"),
entry_points={
"console_scripts": [
"accelerate=accelerate.commands.accelerate_cli:main",
"accelerate-config=accelerate.commands.config:main",
"accelerate-estimate-memory=accelerate.commands.estimate:main",
"accelerate-launch=accelerate.commands.launch:main",
"accelerate-merge-weights=accelerate.commands.merge:main",
]
},
python_requires=">=3.9.0",
install_requires=[
"numpy>=1.17,<3.0.0",
"packaging>=20.0",
"psutil",
"pyyaml",
"torch>=1.10.0",
"huggingface_hub>=0.21.0",
"safetensors>=0.4.3",
],
extras_require=extras,
classifiers=[
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Developers",
"Intended Audience :: Education",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: Apache Software License",
"Operating System :: OS Independent",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.8",
"Topic :: Scientific/Engineering :: Artificial Intelligence",
],
)
# Release checklist
# 1. Checkout the release branch (for a patch the current release branch, for a new minor version, create one):
# git checkout -b vXX.xx-release
# The -b is only necessary for creation (so remove it when doing a patch)
# 2. Change the version in __init__.py and setup.py to the proper value.
# 3. Commit these changes with the message: "Release: v<VERSION>"
# 4. Add a tag in git to mark the release:
# git tag v<VERSION> -m 'Adds tag v<VERSION> for pypi'
# Push the tag and release commit to git: git push --tags origin vXX.xx-release
# 5. Run the following commands in the top-level directory:
# rm -rf dist
# rm -rf build
# python setup.py bdist_wheel
# python setup.py sdist
# 6. Upload the package to the pypi test server first:
# twine upload dist/* -r testpypi
# 7. Check that you can install it in a virtualenv by running:
# pip install accelerate
# pip uninstall accelerate
# pip install -i https://testpypi.python.org/pypi accelerate
# accelerate env
# accelerate test
# 8. Upload the final version to actual pypi:
# twine upload dist/* -r pypi
# 9. Add release notes to the tag in github once everything is looking hunky-dory.
# 10. Go back to the main branch and update the version in __init__.py, setup.py to the new version ".dev" and push to
# main.
| 3 |
0 | hf_public_repos | hf_public_repos/accelerate/LICENSE | Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
| 4 |
0 | hf_public_repos | hf_public_repos/accelerate/Makefile | .PHONY: quality style test docs utils
check_dirs := .
# Check that source code meets quality standards
extra_quality_checks:
python utils/check_copies.py
python utils/check_dummies.py
python utils/check_repo.py
doc-builder style src/accelerate docs/source --max_len 119
# this target runs checks on all files
quality:
ruff check $(check_dirs)
ruff format --check $(check_dirs)
doc-builder style src/accelerate docs/source --max_len 119 --check_only
# Format source code automatically and check is there are any problems left that need manual fixing
style:
ruff check $(check_dirs) --fix
ruff format $(check_dirs)
doc-builder style src/accelerate docs/source --max_len 119
# Run tests for the library
test_big_modeling:
python -m pytest -s -v ./tests/test_big_modeling.py ./tests/test_modeling_utils.py $(if $(IS_GITHUB_CI),--report-log "$(PYTORCH_VERSION)_big_modeling.log",)
test_core:
python -m pytest -s -v ./tests/ --ignore=./tests/test_examples.py --ignore=./tests/deepspeed --ignore=./tests/test_big_modeling.py \
--ignore=./tests/fsdp --ignore=./tests/test_cli.py $(if $(IS_GITHUB_CI),--report-log "$(PYTORCH_VERSION)_core.log",)
test_cli:
python -m pytest -s -v ./tests/test_cli.py $(if $(IS_GITHUB_CI),--report-log "$(PYTORCH_VERSION)_cli.log",)
test_deepspeed:
python -m pytest -s -v ./tests/deepspeed $(if $(IS_GITHUB_CI),--report-log "$(PYTORCH_VERSION)_deepspeed.log",)
test_fsdp:
python -m pytest -s -v ./tests/fsdp $(if $(IS_GITHUB_CI),--report-log "$(PYTORCH_VERSION)_fsdp.log",)
# Since the new version of pytest will *change* how things are collected, we need `deepspeed` to
# run after test_core and test_cli
test:
$(MAKE) test_core
$(MAKE) test_cli
$(MAKE) test_big_modeling
$(MAKE) test_deepspeed
$(MAKE) test_fsdp
test_examples:
python -m pytest -s -v ./tests/test_examples.py $(if $(IS_GITHUB_CI),--report-log "$(PYTORCH_VERSION)_examples.log",)
# Broken down example tests for the CI runners
test_integrations:
python -m pytest -s -v ./tests/deepspeed ./tests/fsdp $(if $(IS_GITHUB_CI),--report-log "$(PYTORCH_VERSION)_integrations.log",)
test_example_differences:
python -m pytest -s -v ./tests/test_examples.py::ExampleDifferenceTests $(if $(IS_GITHUB_CI),--report-log "$(PYTORCH_VERSION)_example_diff.log",)
test_checkpoint_epoch:
python -m pytest -s -v ./tests/test_examples.py::FeatureExamplesTests -k "by_epoch" $(if $(IS_GITHUB_CI),--report-log "$(PYTORCH_VERSION)_checkpoint_epoch.log",)
test_checkpoint_step:
python -m pytest -s -v ./tests/test_examples.py::FeatureExamplesTests -k "by_step" $(if $(IS_GITHUB_CI),--report-log "$(PYTORCH_VERSION)_checkpoint_step.log",)
# Same as test but used to install only the base dependencies
test_prod:
$(MAKE) test_core
test_rest:
python -m pytest -s -v ./tests/test_examples.py::FeatureExamplesTests -k "not by_step and not by_epoch" $(if $(IS_GITHUB_CI),--report-log "$(PYTORCH_VERSION)_rest.log",)
| 5 |
0 | hf_public_repos | hf_public_repos/accelerate/CONTRIBUTING.md | <!---
Copyright 2022 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
-->
# How to contribute to 🤗 Accelerate?
Everyone is welcome to contribute, and we value everybody's contribution. Code
is thus not the only way to help the community. Answering questions, helping
others, reaching out and improving the documentations are immensely valuable to
the community.
It also helps us if you spread the word: reference the library from blog posts
on the awesome projects it made possible, shout out on Twitter every time it has
helped you, or simply star the repo to say "thank you".
Whichever way you choose to contribute, please be mindful to respect our
[code of conduct](https://github.com/huggingface/accelerate/blob/main/CODE_OF_CONDUCT.md).
## You can contribute in so many ways!
Some of the ways you can contribute to Accelerate:
* Fixing outstanding issues with the existing code;
* Contributing to the examples or to the documentation;
* Submitting issues related to bugs or desired new features.
## Submitting a new issue or feature request
Do your best to follow these guidelines when submitting an issue or a feature
request. It will make it easier for us to come back to you quickly and with good
feedback.
### Did you find a bug?
The 🤗 Accelerate library is robust and reliable thanks to the users who notify us of
the problems they encounter. So thank you for reporting an issue.
First, we would really appreciate it if you could **make sure the bug was not
already reported** (use the search bar on Github under Issues).
Did not find it? :( So we can act quickly on it, please follow these steps:
* Include your **OS type and version**, the versions of **Python** and **PyTorch**.
* A short, self-contained, code snippet that allows us to reproduce the bug in
less than 30s;
* Provide the with your Accelerate configuration (located by default in `~/.cache/huggingface/accelerate/default_config.yaml`)
### Do you want a new feature?
A good feature request addresses the following points:
1. Motivation first:
* Is it related to a problem/frustration with the library? If so, please explain
why. Providing a code snippet that demonstrates the problem is best.
* Is it related to something you would need for a project? We'd love to hear
about it!
* Is it something you worked on and think could benefit the community?
Awesome! Tell us what problem it solved for you.
2. Write a *full paragraph* describing the feature;
3. Provide a **code snippet** that demonstrates its future use;
4. In case this is related to a paper, please attach a link;
5. Attach any additional information (drawings, screenshots, etc.) you think may help.
If your issue is well written we're already 80% of the way there by the time you
post it.
## Submitting a pull request (PR)
Before writing code, we strongly advise you to search through the existing PRs or
issues to make sure that nobody is already working on the same thing. If you are
unsure, it is always a good idea to open an issue to get some feedback.
You will need basic `git` proficiency to be able to contribute to
🤗 Accelerate. `git` is not the easiest tool to use but it has the greatest
manual. Type `git --help` in a shell and enjoy. If you prefer books, [Pro
Git](https://git-scm.com/book/en/v2) is a very good reference.
Follow these steps to start contributing:
1. Fork the [repository](https://github.com/huggingface/accelerate) by
clicking on the 'Fork' button on the repository's page. This creates a copy of the code
under your GitHub user account.
2. Clone your fork to your local disk, and add the base repository as a remote. The following command
assumes you have your public SSH key uploaded to GitHub. See the following guide for more
[information](https://docs.github.com/en/repositories/creating-and-managing-repositories/cloning-a-repository).
```bash
$ git clone [email protected]:<your Github handle>/accelerate.git
$ cd accelerate
$ git remote add upstream https://github.com/huggingface/accelerate.git
```
3. Create a new branch to hold your development changes, and do this for every new PR you work on.
Start by synchronizing your `main` branch with the `upstream/main` branch (ore details in the [GitHub Docs](https://docs.github.com/en/github/collaborating-with-issues-and-pull-requests/syncing-a-fork)):
```bash
$ git checkout main
$ git fetch upstream
$ git merge upstream/main
```
Once your `main` branch is synchronized, create a new branch from it:
```bash
$ git checkout -b a-descriptive-name-for-my-changes
```
**Do not** work on the `main` branch.
4. Set up a development environment by running the following command in a conda or a virtual environment you've created for working on this library:
```bash
$ pip install -e ".[dev]"
```
This will install all testing and linting/code quality dependencies for the library (see `quality`, `test_dev`,
`test_prod` targets in [`setup.py`](./setup.py)).
(If accelerate was already installed in the virtual environment, remove
it with `pip uninstall accelerate` before reinstalling it in editable
mode with the `-e` flag).
Alternatively, if you are using [Visual Studio Code](https://code.visualstudio.com/Download), the fastest way to get set up is by using
the provided Dev Container. Documentation on how to get started with dev containers is available [here](https://code.visualstudio.com/docs/remote/containers).
5. Develop the features on your branch.
As you work on the features, you should make sure that the test suite
passes. You should run the tests impacted by your changes like this (see
below an explanation regarding the environment variable):
```bash
$ pytest tests/<TEST_TO_RUN>.py
```
> For the following commands leveraging the `make` utility, we recommend using the WSL system when running on
> Windows. More information [here](https://docs.microsoft.com/en-us/windows/wsl/about).
You can also run the full suite with the following command.
```bash
$ make test
```
`accelerate` relies on `ruff` to format its source code
consistently. After you make changes, apply automatic style corrections and code verifications
that can't be automated in one go with:
This target is also optimized to only work with files modified by the PR you're working on.
If you prefer to run the checks one after the other, the following command apply the
style corrections:
```bash
$ make style
```
`accelerate` also uses a few custom scripts to check for coding mistakes. Quality
control runs in CI, however you can also run the same checks with:
```bash
$ make quality
```
You can also set up [`pre-commit`](https://pre-commit.com/) to run these checks
automatically as Git commit hooks.
```bash
$ pip install pre-commit
$ pre-commit install
```
Once you're happy with your changes, add changed files using `git add` and
make a commit with `git commit` to record your changes locally:
```bash
$ git add modified_file.py
$ git commit
```
Please write [good commit messages](https://chris.beams.io/posts/git-commit/).
It is a good idea to sync your copy of the code with the original
repository regularly. This way you can quickly account for changes:
```bash
$ git fetch upstream
$ git rebase upstream/main
```
Push the changes to your account using:
```bash
$ git push -u origin a-descriptive-name-for-my-changes
```
6. Once you are satisfied (**and the checklist below is happy too**), go to the
webpage of your fork on GitHub. Click on 'Pull request' to send your changes
to the project maintainers for review.
7. It's ok if maintainers ask you for changes. It happens to core contributors
too! So everyone can see the changes in the Pull request, work in your local
branch and push the changes to your fork. They will automatically appear in
the pull request.
### Checklist
1. The title of your pull request should be a summary of its contribution;
2. If your pull request addresses an issue, please mention the issue number in
the pull request description to make sure they are linked (and people
consulting the issue know you are working on it);
3. To indicate a work in progress please prefix the title with `[WIP]`, or mark
the PR as a draft PR. These are useful to avoid duplicated work, and to differentiate
it from PRs ready to be merged;
4. Make sure existing tests pass;
5. Add high-coverage tests. No quality testing = no merge.
See an example of a good PR here: https://github.com/huggingface/accelerate/pull/255
### Tests
An extensive test suite is included to test the library behavior and several examples. Library tests can be found in
the [tests folder](https://github.com/huggingface/accelerate/tree/main/tests).
We use `pytest` in order to run the tests. From the root of the
repository, here's how to run tests with `pytest` for the library:
```bash
$ python -m pytest -sv ./tests
```
In fact, that's how `make test` is implemented (sans the `pip install` line)!
You can specify a smaller set of tests in order to test only the feature
you're working on.
| 6 |
0 | hf_public_repos | hf_public_repos/accelerate/CODE_OF_CONDUCT.md |
# Contributor Covenant Code of Conduct
## Our Pledge
We as members, contributors, and leaders pledge to make participation in our
community a harassment-free experience for everyone, regardless of age, body
size, visible or invisible disability, ethnicity, sex characteristics, gender
identity and expression, level of experience, education, socio-economic status,
nationality, personal appearance, race, religion, or sexual identity
and orientation.
We pledge to act and interact in ways that contribute to an open, welcoming,
diverse, inclusive, and healthy community.
## Our Standards
Examples of behavior that contributes to a positive environment for our
community include:
* Demonstrating empathy and kindness toward other people
* Being respectful of differing opinions, viewpoints, and experiences
* Giving and gracefully accepting constructive feedback
* Accepting responsibility and apologizing to those affected by our mistakes,
and learning from the experience
* Focusing on what is best not just for us as individuals, but for the
overall community
Examples of unacceptable behavior include:
* The use of sexualized language or imagery, and sexual attention or
advances of any kind
* Trolling, insulting or derogatory comments, and personal or political attacks
* Public or private harassment
* Publishing others' private information, such as a physical or email
address, without their explicit permission
* Other conduct which could reasonably be considered inappropriate in a
professional setting
## Enforcement Responsibilities
Community leaders are responsible for clarifying and enforcing our standards of
acceptable behavior and will take appropriate and fair corrective action in
response to any behavior that they deem inappropriate, threatening, offensive,
or harmful.
Community leaders have the right and responsibility to remove, edit, or reject
comments, commits, code, wiki edits, issues, and other contributions that are
not aligned to this Code of Conduct, and will communicate reasons for moderation
decisions when appropriate.
## Scope
This Code of Conduct applies within all community spaces, and also applies when
an individual is officially representing the community in public spaces.
Examples of representing our community include using an official e-mail address,
posting via an official social media account, or acting as an appointed
representative at an online or offline event.
## Enforcement
Instances of abusive, harassing, or otherwise unacceptable behavior may be
reported to the community leaders responsible for enforcement at
[email protected].
All complaints will be reviewed and investigated promptly and fairly.
All community leaders are obligated to respect the privacy and security of the
reporter of any incident.
## Enforcement Guidelines
Community leaders will follow these Community Impact Guidelines in determining
the consequences for any action they deem in violation of this Code of Conduct:
### 1. Correction
**Community Impact**: Use of inappropriate language or other behavior deemed
unprofessional or unwelcome in the community.
**Consequence**: A private, written warning from community leaders, providing
clarity around the nature of the violation and an explanation of why the
behavior was inappropriate. A public apology may be requested.
### 2. Warning
**Community Impact**: A violation through a single incident or series
of actions.
**Consequence**: A warning with consequences for continued behavior. No
interaction with the people involved, including unsolicited interaction with
those enforcing the Code of Conduct, for a specified period of time. This
includes avoiding interactions in community spaces as well as external channels
like social media. Violating these terms may lead to a temporary or
permanent ban.
### 3. Temporary Ban
**Community Impact**: A serious violation of community standards, including
sustained inappropriate behavior.
**Consequence**: A temporary ban from any sort of interaction or public
communication with the community for a specified period of time. No public or
private interaction with the people involved, including unsolicited interaction
with those enforcing the Code of Conduct, is allowed during this period.
Violating these terms may lead to a permanent ban.
### 4. Permanent Ban
**Community Impact**: Demonstrating a pattern of violation of community
standards, including sustained inappropriate behavior, harassment of an
individual, or aggression toward or disparagement of classes of individuals.
**Consequence**: A permanent ban from any sort of public interaction within
the community.
## Attribution
This Code of Conduct is adapted from the [Contributor Covenant][homepage],
version 2.0, available at
https://www.contributor-covenant.org/version/2/0/code_of_conduct.html.
Community Impact Guidelines were inspired by [Mozilla's code of conduct
enforcement ladder](https://github.com/mozilla/diversity).
[homepage]: https://www.contributor-covenant.org
For answers to common questions about this code of conduct, see the FAQ at
https://www.contributor-covenant.org/faq. Translations are available at
https://www.contributor-covenant.org/translations.
| 7 |
0 | hf_public_repos | hf_public_repos/accelerate/pyproject.toml | [tool.ruff]
line-length = 119
target-version = "py38"
[tool.ruff.lint]
preview = true
extend-select = [
"B009", # static getattr
"B010", # static setattr
"CPY", # Copyright
"E", # PEP8 errors
"F", # PEP8 formatting
"I", # Import sorting
"TID251", # Banned API
"UP", # Pyupgrade
"W", # PEP8 warnings
]
ignore = [
"E501", # Line length (handled by ruff-format)
"E741", # Ambiguous variable name
"W605", # Invalid escape sequence
"UP007", # X | Y type annotations
]
[tool.ruff.lint.per-file-ignores]
"__init__.py" = [
"F401", # Ignore seemingly unused imports (they're meant for re-export)
]
"manim_animations/*" = ["ALL"]
[tool.ruff.lint.isort]
lines-after-imports = 2
known-first-party = ["accelerate"]
[tool.ruff.format]
exclude = [
"manim_animations/*"
]
[tool.ruff.lint.flake8-tidy-imports.banned-api]
"os.getenv".msg = "Use os.environ instead"
"os.putenv".msg = "Use os.environ instead"
"os.unsetenv".msg = "Use os.environ instead"
| 8 |
0 | hf_public_repos | hf_public_repos/accelerate/.pre-commit-config.yaml | repos:
- repo: https://github.com/astral-sh/ruff-pre-commit
rev: v0.2.1
hooks:
- id: ruff
args:
- --fix
- id: ruff-format
- repo: https://github.com/pre-commit/pre-commit-hooks
rev: v4.5.0
hooks:
- id: check-merge-conflict
- id: check-yaml
| 9 |
0 | hf_public_repos/api-inference-community/docker_images/fairseq | hf_public_repos/api-inference-community/docker_images/fairseq/tests/test_docker_build.py | import os
import subprocess
from unittest import TestCase
class cd:
"""Context manager for changing the current working directory"""
def __init__(self, newPath):
self.newPath = os.path.expanduser(newPath)
def __enter__(self):
self.savedPath = os.getcwd()
os.chdir(self.newPath)
def __exit__(self, etype, value, traceback):
os.chdir(self.savedPath)
class DockerBuildTestCase(TestCase):
def test_can_build_docker_image(self):
with cd(os.path.dirname(os.path.dirname(__file__))):
subprocess.check_output(["docker", "build", "."])
| 0 |
0 | hf_public_repos/api-inference-community/docker_images/fairseq | hf_public_repos/api-inference-community/docker_images/fairseq/tests/test_api_audio_to_audio.py | import base64
import json
import os
from unittest import TestCase, skipIf
from api_inference_community.validation import ffmpeg_read
from app.main import ALLOWED_TASKS
from starlette.testclient import TestClient
from tests.test_api import TESTABLE_MODELS
@skipIf(
"audio-to-audio" not in ALLOWED_TASKS,
"audio-to-audio not implemented",
)
class AudioToAudioTestCase(TestCase):
def setUp(self):
model_id = TESTABLE_MODELS["audio-to-audio"]
self.old_model_id = os.getenv("MODEL_ID")
self.old_task = os.getenv("TASK")
os.environ["MODEL_ID"] = model_id
os.environ["TASK"] = "audio-to-audio"
from app.main import app
self.app = app
@classmethod
def setUpClass(cls):
from app.main import get_pipeline
get_pipeline.cache_clear()
def tearDown(self):
if self.old_model_id is not None:
os.environ["MODEL_ID"] = self.old_model_id
else:
del os.environ["MODEL_ID"]
if self.old_task is not None:
os.environ["TASK"] = self.old_task
else:
del os.environ["TASK"]
def read(self, filename: str) -> bytes:
dirname = os.path.dirname(os.path.abspath(__file__))
filename = os.path.join(dirname, "samples", filename)
with open(filename, "rb") as f:
bpayload = f.read()
return bpayload
def test_simple(self):
bpayload = self.read("sample2.flac")
with TestClient(self.app) as client:
response = client.post("/", data=bpayload)
self.assertEqual(
response.status_code,
200,
)
self.assertEqual(response.headers["content-type"], "application/json")
audio = json.loads(response.content)
self.assertTrue(isinstance(audio, list))
self.assertEqual(set(audio[0].keys()), {"blob", "content-type", "label"})
data = base64.b64decode(audio[0]["blob"])
wavform = ffmpeg_read(data, 16000)
self.assertGreater(wavform.shape[0], 1000)
self.assertTrue(isinstance(audio[0]["content-type"], str))
self.assertTrue(isinstance(audio[0]["label"], str))
def test_malformed_audio(self):
bpayload = self.read("malformed.flac")
with TestClient(self.app) as client:
response = client.post("/", data=bpayload)
self.assertEqual(
response.status_code,
400,
)
self.assertEqual(response.content, b'{"error":"Malformed soundfile"}')
| 1 |
0 | hf_public_repos/api-inference-community/docker_images/fairseq | hf_public_repos/api-inference-community/docker_images/fairseq/tests/test_api.py | import os
from typing import Dict
from unittest import TestCase, skipIf
from app.main import ALLOWED_TASKS, get_pipeline
# Must contain at least one example of each implemented pipeline
# Tests do not check the actual values of the model output, so small dummy
# models are recommended for faster tests.
TESTABLE_MODELS: Dict[str, str] = {
"text-to-speech": "facebook/fastspeech2-en-ljspeech",
"audio-to-audio": "facebook/xm_transformer_sm_all-en",
}
ALL_TASKS = {
"text-to-speech",
"audio-to-audio",
}
class PipelineTestCase(TestCase):
@skipIf(
os.path.dirname(os.path.dirname(__file__)).endswith("common"),
"common is a special case",
)
def test_has_at_least_one_task_enabled(self):
self.assertGreater(
len(ALLOWED_TASKS.keys()), 0, "You need to implement at least one task"
)
def test_unsupported_tasks(self):
unsupported_tasks = ALL_TASKS - ALLOWED_TASKS.keys()
for unsupported_task in unsupported_tasks:
with self.subTest(msg=unsupported_task, task=unsupported_task):
os.environ["TASK"] = unsupported_task
os.environ["MODEL_ID"] = "XX"
with self.assertRaises(EnvironmentError):
get_pipeline()
| 2 |
0 | hf_public_repos/api-inference-community/docker_images/fairseq | hf_public_repos/api-inference-community/docker_images/fairseq/tests/test_api_text_to_speech.py | import os
from unittest import TestCase, skipIf
from api_inference_community.validation import ffmpeg_read
from app.main import ALLOWED_TASKS
from starlette.testclient import TestClient
from tests.test_api import TESTABLE_MODELS
@skipIf(
"text-to-speech" not in ALLOWED_TASKS,
"text-to-speech not implemented",
)
class TextToSpeechTestCase(TestCase):
def setUp(self):
model_id = TESTABLE_MODELS["text-to-speech"]
self.old_model_id = os.getenv("MODEL_ID")
self.old_task = os.getenv("TASK")
os.environ["MODEL_ID"] = model_id
os.environ["TASK"] = "text-to-speech"
from app.main import app
self.app = app
@classmethod
def setUpClass(cls):
from app.main import get_pipeline
get_pipeline.cache_clear()
def tearDown(self):
if self.old_model_id is not None:
os.environ["MODEL_ID"] = self.old_model_id
else:
del os.environ["MODEL_ID"]
if self.old_task is not None:
os.environ["TASK"] = self.old_task
else:
del os.environ["TASK"]
def test_simple(self):
with TestClient(self.app) as client:
response = client.post("/", json={"inputs": "This is some text"})
self.assertEqual(
response.status_code,
200,
)
self.assertEqual(response.headers["content-type"], "audio/flac")
audio = ffmpeg_read(response.content, 16000)
self.assertEqual(len(audio.shape), 1)
self.assertGreater(audio.shape[0], 1000)
def test_malformed_input(self):
with TestClient(self.app) as client:
response = client.post("/", data=b"\xc3\x28")
self.assertEqual(
response.status_code,
400,
)
self.assertEqual(
response.content,
b'{"error":"\'utf-8\' codec can\'t decode byte 0xc3 in position 0: invalid continuation byte"}',
)
| 3 |
0 | hf_public_repos/api-inference-community/docker_images | hf_public_repos/api-inference-community/docker_images/setfit/requirements.txt | starlette==0.27.0
git+https://github.com/huggingface/api-inference-community.git@f06a71e72e92caeebabaeced979eacb3542bf2ca
huggingface_hub==0.20.2
setfit==1.0.3
| 4 |
0 | hf_public_repos/api-inference-community/docker_images | hf_public_repos/api-inference-community/docker_images/setfit/Dockerfile | FROM tiangolo/uvicorn-gunicorn:python3.8
LABEL maintainer="Tom Aarsen <[email protected]>"
# Add any system dependency here
# RUN apt-get update -y && apt-get install libXXX -y
COPY ./requirements.txt /app
RUN pip install --no-cache-dir -r requirements.txt
COPY ./prestart.sh /app/
# Most DL models are quite large in terms of memory, using workers is a HUGE
# slowdown because of the fork and GIL with python.
# Using multiple pods seems like a better default strategy.
# Feel free to override if it does not make sense for your library.
ARG max_workers=1
ENV MAX_WORKERS=$max_workers
ENV HUGGINGFACE_HUB_CACHE=/data
# Necessary on GPU environment docker.
# TIMEOUT env variable is used by nvcr.io/nvidia/pytorch:xx for another purpose
# rendering TIMEOUT defined by uvicorn impossible to use correctly
# We're overriding it to be renamed UVICORN_TIMEOUT
# UVICORN_TIMEOUT is a useful variable for very large models that take more
# than 30s (the default) to load in memory.
# If UVICORN_TIMEOUT is too low, uvicorn will simply never loads as it will
# kill workers all the time before they finish.
RUN sed -i 's/TIMEOUT/UVICORN_TIMEOUT/g' /gunicorn_conf.py
COPY ./app /app/app
| 5 |
0 | hf_public_repos/api-inference-community/docker_images | hf_public_repos/api-inference-community/docker_images/setfit/prestart.sh | python app/main.py
| 6 |
0 | hf_public_repos/api-inference-community/docker_images/setfit | hf_public_repos/api-inference-community/docker_images/setfit/app/main.py | import functools
import logging
import os
import pathlib
from typing import Dict, Type
from api_inference_community import hub
from api_inference_community.routes import pipeline_route, status_ok
from app.pipelines import Pipeline, TextClassificationPipeline
from huggingface_hub import constants
from starlette.applications import Starlette
from starlette.middleware import Middleware
from starlette.middleware.gzip import GZipMiddleware
from starlette.routing import Route
TASK = os.getenv("TASK")
def get_model_id():
m_id = os.getenv("MODEL_ID")
# Workaround, when sentence_transformers handles properly this env variable
# this should not be needed anymore
if constants.HF_HUB_OFFLINE:
cache_dir = pathlib.Path(constants.HF_HUB_CACHE)
m_id = hub.cached_revision_path(
cache_dir=cache_dir, repo_id=m_id, revision=os.getenv("REVISION")
)
return m_id
MODEL_ID = get_model_id()
logger = logging.getLogger(__name__)
# Add the allowed tasks
# Supported tasks are:
# - text-generation
# - text-classification
# - token-classification
# - translation
# - summarization
# - automatic-speech-recognition
# - ...
# For instance
# from app.pipelines import AutomaticSpeechRecognitionPipeline
# ALLOWED_TASKS = {"automatic-speech-recognition": AutomaticSpeechRecognitionPipeline}
# You can check the requirements and expectations of each pipelines in their respective
# directories. Implement directly within the directories.
ALLOWED_TASKS: Dict[str, Type[Pipeline]] = {
"text-classification": TextClassificationPipeline,
}
@functools.lru_cache()
def get_pipeline() -> Pipeline:
task = os.environ["TASK"]
model_id = MODEL_ID
if task not in ALLOWED_TASKS:
raise EnvironmentError(f"{task} is not a valid pipeline for model : {model_id}")
return ALLOWED_TASKS[task](model_id)
routes = [
Route("/{whatever:path}", status_ok),
Route("/{whatever:path}", pipeline_route, methods=["POST"]),
]
middleware = [Middleware(GZipMiddleware, minimum_size=1000)]
if os.environ.get("DEBUG", "") == "1":
from starlette.middleware.cors import CORSMiddleware
middleware.append(
Middleware(
CORSMiddleware,
allow_origins=["*"],
allow_headers=["*"],
allow_methods=["*"],
)
)
app = Starlette(routes=routes, middleware=middleware)
@app.on_event("startup")
async def startup_event():
logger = logging.getLogger("uvicorn.access")
handler = logging.StreamHandler()
handler.setFormatter(logging.Formatter("%(asctime)s - %(levelname)s - %(message)s"))
logger.handlers = [handler]
# Link between `api-inference-community` and framework code.
app.get_pipeline = get_pipeline
try:
get_pipeline()
except Exception:
# We can fail so we can show exception later.
pass
if __name__ == "__main__":
try:
get_pipeline()
except Exception:
# We can fail so we can show exception later.
pass
| 7 |
0 | hf_public_repos/api-inference-community/docker_images/setfit/app | hf_public_repos/api-inference-community/docker_images/setfit/app/pipelines/base.py | from abc import ABC, abstractmethod
from typing import Any
class Pipeline(ABC):
@abstractmethod
def __init__(self, model_id: str):
raise NotImplementedError("Pipelines should implement an __init__ method")
@abstractmethod
def __call__(self, inputs: Any) -> Any:
raise NotImplementedError("Pipelines should implement a __call__ method")
class PipelineException(Exception):
pass
| 8 |
0 | hf_public_repos/api-inference-community/docker_images/setfit/app | hf_public_repos/api-inference-community/docker_images/setfit/app/pipelines/__init__.py | from app.pipelines.base import Pipeline, PipelineException
from app.pipelines.text_classification import TextClassificationPipeline
| 9 |
0 | hf_public_repos/candle/candle-kernels | hf_public_repos/candle/candle-kernels/src/unary.cu | #define _USE_MATH_DEFINES
#include<math.h>
#include<stdint.h>
#include "cuda_utils.cuh"
#define UNARY_OP(TYPENAME, FN_NAME, FUNC) \
extern "C" __global__ void FN_NAME( \
const size_t numel, \
const size_t num_dims, \
const size_t *info, \
const TYPENAME *inp, \
TYPENAME *out \
) { \
const size_t *dims = info; \
const size_t *strides = info + num_dims; \
if (info == nullptr || is_contiguous(num_dims, dims, strides)) { \
for (unsigned int i = blockIdx.x * blockDim.x + threadIdx.x; i < numel; i += blockDim.x * gridDim.x) { \
TYPENAME x = inp ? inp[i] : out[i]; \
out[i] = FUNC; \
} \
} \
else { \
for (unsigned int i = blockIdx.x * blockDim.x + threadIdx.x; i < numel; i += blockDim.x * gridDim.x) { \
unsigned strided_i = get_strided_index(i, num_dims, dims, strides); \
TYPENAME x = inp ? inp[strided_i] : out[i]; \
out[i] = FUNC; \
} \
} \
} \
template<typename T>
__device__ __forceinline__ T gelu_erf_fwd(T x) {
return x * normcdfg(x);
}
template<typename T>
__device__ __forceinline__ T gelu_fwd(T x) {
T x_sq = x * x;
T x_cube = x_sq * x;
T alpha = x + static_cast<T>(0.044715) * x_cube;
return static_cast<T>(0.5) * x * (static_cast<T>(1.0) + tanhg(static_cast<T>(M_2_SQRTPI * M_SQRT1_2) * alpha));
}
template<typename T>
__device__ __forceinline__ T elu_fwd(T x, T alpha) {
if (x > static_cast<T>(0)) {
return x;
}
return alpha * (expg(x) - static_cast<T>(1));
}
template<typename T>
__device__ __forceinline__ T relu_fwd(T x) {
T zero = 0.;
return maxg(x, zero);
}
template<typename T>
__device__ __forceinline__ T silu_fwd(T x) {
return x / (static_cast<T>(1) + expg(-x));
}
template<typename T>
__device__ __forceinline__ T sigmoid_fwd(T x) {
return recipg(static_cast<T>(1) + expg(-x));
}
#define UNARY_OP1(TYPENAME, FN_NAME, FUNC) \
extern "C" __global__ void FN_NAME( \
const size_t numel, \
const size_t num_dims, \
const size_t *info, \
const TYPENAME param, \
const TYPENAME *inp, \
TYPENAME *out \
) { \
const size_t *dims = info; \
const size_t *strides = info + num_dims; \
if (info == nullptr || is_contiguous(num_dims, dims, strides)) { \
for (unsigned int i = blockIdx.x * blockDim.x + threadIdx.x; i < numel; i += blockDim.x * gridDim.x) { \
TYPENAME x = inp ? inp[i] : out[i]; \
out[i] = FUNC; \
} \
} \
else { \
for (unsigned int i = blockIdx.x * blockDim.x + threadIdx.x; i < numel; i += blockDim.x * gridDim.x) { \
unsigned strided_i = get_strided_index(i, num_dims, dims, strides); \
TYPENAME x = inp ? inp[strided_i] : out[i]; \
out[i] = FUNC; \
} \
} \
} \
template<typename T>
__device__ T sign_(T t) {
return static_cast<T>(t > static_cast<T>(0)) - static_cast<T>(t < static_cast<T>(0));
}
#if __CUDA_ARCH__ >= 800
UNARY_OP(__nv_bfloat16, ucopy_bf16, x)
UNARY_OP(__nv_bfloat16, uneg_bf16, -x)
UNARY_OP(__nv_bfloat16, urecip_bf16, recipg(x))
UNARY_OP(__nv_bfloat16, uexp_bf16, expg(x))
UNARY_OP(__nv_bfloat16, ulog_bf16, logg(x))
UNARY_OP(__nv_bfloat16, usin_bf16, sing(x))
UNARY_OP(__nv_bfloat16, ucos_bf16, cosg(x))
UNARY_OP(__nv_bfloat16, utanh_bf16, tanhg(x))
UNARY_OP(__nv_bfloat16, uerf_bf16, erfg(x))
UNARY_OP(__nv_bfloat16, uceil_bf16, ceilg(x))
UNARY_OP(__nv_bfloat16, ufloor_bf16, floorg(x))
UNARY_OP(__nv_bfloat16, uround_bf16, roundg(x))
UNARY_OP(__nv_bfloat16, unormcdf_bf16, normcdfg(x))
UNARY_OP(__nv_bfloat16, uabs_bf16, absg(x))
UNARY_OP(__nv_bfloat16, usqr_bf16, x*x)
UNARY_OP(__nv_bfloat16, usqrt_bf16, sqrtg(x))
UNARY_OP(__nv_bfloat16, ugelu_bf16, gelu_fwd(x))
UNARY_OP(__nv_bfloat16, ugelu_erf_bf16, gelu_erf_fwd(x))
UNARY_OP(__nv_bfloat16, urelu_bf16, relu_fwd(x))
UNARY_OP1(__nv_bfloat16, uelu_bf16, elu_fwd(x, param))
UNARY_OP(__nv_bfloat16, usilu_bf16, silu_fwd(x))
UNARY_OP1(__nv_bfloat16, upowf_bf16, powg(x, param))
UNARY_OP(__nv_bfloat16, usign_bf16, sign_(x))
UNARY_OP(__nv_bfloat16, usigmoid_bf16, sigmoid_fwd(x))
#endif
#if __CUDA_ARCH__ >= 530
UNARY_OP(__half, ucopy_f16, x)
UNARY_OP(__half, uneg_f16, -x)
UNARY_OP(__half, urecip_f16, recipg(x))
UNARY_OP(__half, uexp_f16, expg(x))
UNARY_OP(__half, ulog_f16, logg(x))
UNARY_OP(__half, usin_f16, sing(x))
UNARY_OP(__half, ucos_f16, cosg(x))
UNARY_OP(__half, utanh_f16, tanhg(x))
UNARY_OP(__half, uerf_f16, erfg(x))
UNARY_OP(__half, uceil_f16, ceilg(x))
UNARY_OP(__half, ufloor_f16, floorg(x))
UNARY_OP(__half, uround_f16, roundg(x))
UNARY_OP(__half, unormcdf_f16, normcdfg(x))
UNARY_OP(__half, uabs_f16, absg(x))
UNARY_OP(__half, usqr_f16, x*x)
UNARY_OP(__half, usqrt_f16, sqrtg(x))
UNARY_OP(__half, ugelu_f16, gelu_fwd(x))
UNARY_OP(__half, ugelu_erf_f16, gelu_erf_fwd(x))
UNARY_OP(__half, urelu_f16, relu_fwd(x))
UNARY_OP1(__half, uelu_f16, elu_fwd(x, param))
UNARY_OP(__half, usilu_f16, silu_fwd(x))
UNARY_OP1(__half, upowf_f16, powg(x, param))
UNARY_OP(__half, usign_f16, sign_(x))
UNARY_OP(__half, usigmoid_f16, sigmoid_fwd(x))
#endif
UNARY_OP(uint8_t, ucopy_u8, x)
UNARY_OP(uint32_t, ucopy_u32, x)
UNARY_OP(int64_t, ucopy_i64, x)
UNARY_OP(float, ucopy_f32, x)
UNARY_OP(double, ucopy_f64, x)
UNARY_OP(float, uneg_f32, -x)
UNARY_OP(double, uneg_f64, -x)
UNARY_OP(float, urecip_f32, recipg(x))
UNARY_OP(double, urecip_f64, recipg(x))
UNARY_OP(float, uexp_f32, expg(x))
UNARY_OP(double, uexp_f64, expg(x))
UNARY_OP(float, ulog_f32, logg(x))
UNARY_OP(double, ulog_f64, logg(x))
UNARY_OP(float, usin_f32, sing(x))
UNARY_OP(double, usin_f64, sing(x))
UNARY_OP(float, ucos_f32, cosg(x))
UNARY_OP(double, ucos_f64, cosg(x))
UNARY_OP(float, utanh_f32, tanhg(x))
UNARY_OP(double, utanh_f64, tanhg(x))
UNARY_OP(float, uerf_f32, erfg(x))
UNARY_OP(double, uerf_f64, erfg(x))
UNARY_OP(float, uceil_f32, ceilg(x))
UNARY_OP(double, uceil_f64, ceilg(x))
UNARY_OP(float, ufloor_f32, floorg(x))
UNARY_OP(double, ufloor_f64, floorg(x))
UNARY_OP(float, uround_f32, roundg(x))
UNARY_OP(double, uround_f64, roundg(x))
UNARY_OP(float, unormcdf_f32, normcdfg(x))
UNARY_OP(double, unormcdf_f64, normcdfg(x))
UNARY_OP(float, uabs_f32, absg(x))
UNARY_OP(double, uabs_f64, absg(x))
UNARY_OP(float, usqr_f32, x*x)
UNARY_OP(double, usqr_f64, x*x)
UNARY_OP(float, usqrt_f32, sqrtg(x))
UNARY_OP(double, usqrt_f64, sqrtg(x))
UNARY_OP(float, ugelu_f32, gelu_fwd(x))
UNARY_OP(double, ugelu_f64, gelu_fwd(x))
UNARY_OP(float, ugelu_erf_f32, gelu_erf_fwd(x))
UNARY_OP(double, ugelu_erf_f64, gelu_erf_fwd(x))
UNARY_OP(float, urelu_f32, relu_fwd(x))
UNARY_OP(double, urelu_f64, relu_fwd(x))
UNARY_OP1(float, uelu_f32, elu_fwd(x, param))
UNARY_OP1(double, uelu_f64, elu_fwd(x, param))
UNARY_OP(float, usilu_f32, silu_fwd(x))
UNARY_OP(double, usilu_f64, silu_fwd(x))
UNARY_OP1(float, upowf_f32, powg(x, param))
UNARY_OP1(double, upowf_f64, powg(x, param))
UNARY_OP(float, usign_f32, sign_(x))
UNARY_OP(double, usign_f64, sign_(x))
UNARY_OP(float, usigmoid_f32, sigmoid_fwd(x))
UNARY_OP(double, usigmoid_f64, sigmoid_fwd(x))
| 0 |
0 | hf_public_repos/candle/candle-kernels | hf_public_repos/candle/candle-kernels/src/quantized.cu | // Kernels adapted from llama.cpp ggml-cuda.cu
// https://github.com/ggerganov/llama.cpp/blob/master/ggml-cuda.cu
#include "cuda_fp16.h"
#include "cuda_bf16.h"
#include<stdint.h>
#define GGML_UNUSED(x) (void)(x)
#define GGML_CUDA_ASSUME(x)
#ifdef GGML_QKK_64
#define QK_K 64
#define K_SCALE_SIZE 4
#else
#define QK_K 256
#define K_SCALE_SIZE 12
#endif
#undef GGML_CUDA_F16
#define GGML_CUDA_DMMV_X 32
#define CUDA_QUANTIZE_BLOCK_SIZE 256
#define CUDA_DEQUANTIZE_BLOCK_SIZE 256
#define K_QUANTS_PER_ITERATION 2
typedef uint16_t ggml_fp16_t;
typedef float dfloat; // dequantize float
typedef float2 dfloat2;
typedef void (*dequantize_kernel_t)(const void * vx, const int ib, const int iqs, dfloat2 & v);
static __device__ __forceinline__ float warp_reduce_sum(float x) {
#pragma unroll
for (int mask = 16; mask > 0; mask >>= 1) {
x += __shfl_xor_sync(0xffffffff, x, mask, 32);
}
return x;
}
static __device__ __forceinline__ float warp_reduce_max(float x) {
#pragma unroll
for (int mask = 16; mask > 0; mask >>= 1) {
x = fmaxf(x, __shfl_xor_sync(0xffffffff, x, mask, 32));
}
return x;
}
static __device__ __forceinline__ int get_int_from_int8(const int8_t * x8, const int & i32) {
const uint16_t * x16 = (const uint16_t *) (x8 + sizeof(int) * i32); // assume at least 2 byte alignment
int x32 = 0;
x32 |= x16[0] << 0;
x32 |= x16[1] << 16;
return x32;
}
static __device__ __forceinline__ int get_int_from_uint8(const uint8_t * x8, const int & i32) {
const uint16_t * x16 = (const uint16_t *) (x8 + sizeof(int) * i32); // assume at least 2 byte alignment
int x32 = 0;
x32 |= x16[0] << 0;
x32 |= x16[1] << 16;
return x32;
}
static __device__ __forceinline__ int get_int_from_int8_aligned(const int8_t * x8, const int & i32) {
return *((const int *) (x8 + sizeof(int) * i32)); // assume at least 4 byte alignment
}
static __device__ __forceinline__ int get_int_from_uint8_aligned(const uint8_t * x8, const int & i32) {
return *((const int *) (x8 + sizeof(int) * i32)); // assume at least 4 byte alignment
}
#define WARP_SIZE 32
#define CUDART_HMAX 11070 // CUDA 11.7, min. ver. for which __hmax and __hmax2 are known to work (may be higher than needed)
#define CC_PASCAL 600
#define MIN_CC_DP4A 610 // minimum compute capability for __dp4a, an intrinsic for byte-wise dot products
#define CC_VOLTA 700
#define CC_OFFSET_AMD 1000000
#define CC_RDNA1 (CC_OFFSET_AMD + 1010)
#define CC_RDNA2 (CC_OFFSET_AMD + 1030)
#define CC_RDNA3 (CC_OFFSET_AMD + 1100)
static __device__ __forceinline__ int ggml_cuda_dp4a(const int a, const int b, int c) {
#if __CUDA_ARCH__ >= MIN_CC_DP4A
return __dp4a(a, b, c);
#else // __CUDA_ARCH__ >= MIN_CC_DP4A
const int8_t * a8 = (const int8_t *) &a;
const int8_t * b8 = (const int8_t *) &b;
return c + a8[0]*b8[0] + a8[1]*b8[1] + a8[2]*b8[2] + a8[3]*b8[3];
#endif // __CUDA_ARCH__ >= MIN_CC_DP4A
}
#define MMQ_X_Q4_0_RDNA2 64
#define MMQ_Y_Q4_0_RDNA2 128
#define NWARPS_Q4_0_RDNA2 8
#define MMQ_X_Q4_0_RDNA1 64
#define MMQ_Y_Q4_0_RDNA1 64
#define NWARPS_Q4_0_RDNA1 8
#if defined(CUDA_USE_TENSOR_CORES)
#define MMQ_X_Q4_0_AMPERE 4
#define MMQ_Y_Q4_0_AMPERE 32
#define NWARPS_Q4_0_AMPERE 4
#else
#define MMQ_X_Q4_0_AMPERE 64
#define MMQ_Y_Q4_0_AMPERE 128
#define NWARPS_Q4_0_AMPERE 4
#endif
#define MMQ_X_Q4_0_PASCAL 64
#define MMQ_Y_Q4_0_PASCAL 64
#define NWARPS_Q4_0_PASCAL 8
#define MMQ_X_Q4_1_RDNA2 64
#define MMQ_Y_Q4_1_RDNA2 128
#define NWARPS_Q4_1_RDNA2 8
#define MMQ_X_Q4_1_RDNA1 64
#define MMQ_Y_Q4_1_RDNA1 64
#define NWARPS_Q4_1_RDNA1 8
#if defined(CUDA_USE_TENSOR_CORES)
#define MMQ_X_Q4_1_AMPERE 4
#define MMQ_Y_Q4_1_AMPERE 32
#define NWARPS_Q4_1_AMPERE 4
#else
#define MMQ_X_Q4_1_AMPERE 64
#define MMQ_Y_Q4_1_AMPERE 128
#define NWARPS_Q4_1_AMPERE 4
#endif
#define MMQ_X_Q4_1_PASCAL 64
#define MMQ_Y_Q4_1_PASCAL 64
#define NWARPS_Q4_1_PASCAL 8
#define MMQ_X_Q5_0_RDNA2 64
#define MMQ_Y_Q5_0_RDNA2 128
#define NWARPS_Q5_0_RDNA2 8
#define MMQ_X_Q5_0_RDNA1 64
#define MMQ_Y_Q5_0_RDNA1 64
#define NWARPS_Q5_0_RDNA1 8
#if defined(CUDA_USE_TENSOR_CORES)
#define MMQ_X_Q5_0_AMPERE 4
#define MMQ_Y_Q5_0_AMPERE 32
#define NWARPS_Q5_0_AMPERE 4
#else
#define MMQ_X_Q5_0_AMPERE 128
#define MMQ_Y_Q5_0_AMPERE 64
#define NWARPS_Q5_0_AMPERE 4
#endif
#define MMQ_X_Q5_0_PASCAL 64
#define MMQ_Y_Q5_0_PASCAL 64
#define NWARPS_Q5_0_PASCAL 8
#define MMQ_X_Q5_1_RDNA2 64
#define MMQ_Y_Q5_1_RDNA2 128
#define NWARPS_Q5_1_RDNA2 8
#define MMQ_X_Q5_1_RDNA1 64
#define MMQ_Y_Q5_1_RDNA1 64
#define NWARPS_Q5_1_RDNA1 8
#if defined(CUDA_USE_TENSOR_CORES)
#define MMQ_X_Q5_1_AMPERE 4
#define MMQ_Y_Q5_1_AMPERE 32
#define NWARPS_Q5_1_AMPERE 4
#else
#define MMQ_X_Q5_1_AMPERE 128
#define MMQ_Y_Q5_1_AMPERE 64
#define NWARPS_Q5_1_AMPERE 4
#endif
#define MMQ_X_Q5_1_PASCAL 64
#define MMQ_Y_Q5_1_PASCAL 64
#define NWARPS_Q5_1_PASCAL 8
#define MMQ_X_Q8_0_RDNA2 64
#define MMQ_Y_Q8_0_RDNA2 128
#define NWARPS_Q8_0_RDNA2 8
#define MMQ_X_Q8_0_RDNA1 64
#define MMQ_Y_Q8_0_RDNA1 64
#define NWARPS_Q8_0_RDNA1 8
#if defined(CUDA_USE_TENSOR_CORES)
#define MMQ_X_Q8_0_AMPERE 4
#define MMQ_Y_Q8_0_AMPERE 32
#define NWARPS_Q8_0_AMPERE 4
#else
#define MMQ_X_Q8_0_AMPERE 128
#define MMQ_Y_Q8_0_AMPERE 64
#define NWARPS_Q8_0_AMPERE 4
#endif
#define MMQ_X_Q8_0_PASCAL 64
#define MMQ_Y_Q8_0_PASCAL 64
#define NWARPS_Q8_0_PASCAL 8
#define MMQ_X_Q2_K_RDNA2 64
#define MMQ_Y_Q2_K_RDNA2 128
#define NWARPS_Q2_K_RDNA2 8
#define MMQ_X_Q2_K_RDNA1 128
#define MMQ_Y_Q2_K_RDNA1 32
#define NWARPS_Q2_K_RDNA1 8
#if defined(CUDA_USE_TENSOR_CORES)
#define MMQ_X_Q2_K_AMPERE 4
#define MMQ_Y_Q2_K_AMPERE 32
#define NWARPS_Q2_K_AMPERE 4
#else
#define MMQ_X_Q2_K_AMPERE 64
#define MMQ_Y_Q2_K_AMPERE 128
#define NWARPS_Q2_K_AMPERE 4
#endif
#define MMQ_X_Q2_K_PASCAL 64
#define MMQ_Y_Q2_K_PASCAL 64
#define NWARPS_Q2_K_PASCAL 8
#define MMQ_X_Q3_K_RDNA2 128
#define MMQ_Y_Q3_K_RDNA2 64
#define NWARPS_Q3_K_RDNA2 8
#define MMQ_X_Q3_K_RDNA1 32
#define MMQ_Y_Q3_K_RDNA1 128
#define NWARPS_Q3_K_RDNA1 8
#if defined(CUDA_USE_TENSOR_CORES)
#define MMQ_X_Q3_K_AMPERE 4
#define MMQ_Y_Q3_K_AMPERE 32
#define NWARPS_Q3_K_AMPERE 4
#else
#define MMQ_X_Q3_K_AMPERE 128
#define MMQ_Y_Q3_K_AMPERE 128
#define NWARPS_Q3_K_AMPERE 4
#endif
#define MMQ_X_Q3_K_PASCAL 64
#define MMQ_Y_Q3_K_PASCAL 64
#define NWARPS_Q3_K_PASCAL 8
#define MMQ_X_Q4_K_RDNA2 64
#define MMQ_Y_Q4_K_RDNA2 128
#define NWARPS_Q4_K_RDNA2 8
#define MMQ_X_Q4_K_RDNA1 32
#define MMQ_Y_Q4_K_RDNA1 64
#define NWARPS_Q4_K_RDNA1 8
#if defined(CUDA_USE_TENSOR_CORES)
#define MMQ_X_Q4_K_AMPERE 4
#define MMQ_Y_Q4_K_AMPERE 32
#define NWARPS_Q4_K_AMPERE 4
#else
#define MMQ_X_Q4_K_AMPERE 64
#define MMQ_Y_Q4_K_AMPERE 128
#define NWARPS_Q4_K_AMPERE 4
#endif
#define MMQ_X_Q4_K_PASCAL 64
#define MMQ_Y_Q4_K_PASCAL 64
#define NWARPS_Q4_K_PASCAL 8
#define MMQ_X_Q5_K_RDNA2 64
#define MMQ_Y_Q5_K_RDNA2 128
#define NWARPS_Q5_K_RDNA2 8
#define MMQ_X_Q5_K_RDNA1 32
#define MMQ_Y_Q5_K_RDNA1 64
#define NWARPS_Q5_K_RDNA1 8
#if defined(CUDA_USE_TENSOR_CORES)
#define MMQ_X_Q5_K_AMPERE 4
#define MMQ_Y_Q5_K_AMPERE 32
#define NWARPS_Q5_K_AMPERE 4
#else
#define MMQ_X_Q5_K_AMPERE 64
#define MMQ_Y_Q5_K_AMPERE 128
#define NWARPS_Q5_K_AMPERE 4
#endif
#define MMQ_X_Q5_K_PASCAL 64
#define MMQ_Y_Q5_K_PASCAL 64
#define NWARPS_Q5_K_PASCAL 8
#define MMQ_X_Q6_K_RDNA2 64
#define MMQ_Y_Q6_K_RDNA2 128
#define NWARPS_Q6_K_RDNA2 8
#define MMQ_X_Q6_K_RDNA1 32
#define MMQ_Y_Q6_K_RDNA1 64
#define NWARPS_Q6_K_RDNA1 8
#if defined(CUDA_USE_TENSOR_CORES)
#define MMQ_X_Q6_K_AMPERE 4
#define MMQ_Y_Q6_K_AMPERE 32
#define NWARPS_Q6_K_AMPERE 4
#else
#define MMQ_X_Q6_K_AMPERE 64
#define MMQ_Y_Q6_K_AMPERE 64
#define NWARPS_Q6_K_AMPERE 4
#endif
#define MMQ_X_Q6_K_PASCAL 64
#define MMQ_Y_Q6_K_PASCAL 64
#define NWARPS_Q6_K_PASCAL 8
// QK = number of values after dequantization
// QR = QK / number of values before dequantization
// QI = number of 32 bit integers before dequantization
#define QK4_0 32
#define QR4_0 2
#define QI4_0 (QK4_0 / (4 * QR4_0))
typedef struct {
half d; // delta
uint8_t qs[QK4_0 / 2]; // nibbles / quants
} block_q4_0;
static_assert(sizeof(block_q4_0) == sizeof(ggml_fp16_t) + QK4_0 / 2, "wrong q4_0 block size/padding");
#define QK4_1 32
#define QR4_1 2
#define QI4_1 (QK4_1 / (4 * QR4_1))
typedef struct {
half2 dm; // dm.x = delta, dm.y = min
uint8_t qs[QK4_1 / 2]; // nibbles / quants
} block_q4_1;
static_assert(sizeof(block_q4_1) == sizeof(ggml_fp16_t) * 2 + QK4_1 / 2, "wrong q4_1 block size/padding");
#define QK5_0 32
#define QR5_0 2
#define QI5_0 (QK5_0 / (4 * QR5_0))
typedef struct {
half d; // delta
uint8_t qh[4]; // 5-th bit of quants
uint8_t qs[QK5_0 / 2]; // nibbles / quants
} block_q5_0;
static_assert(sizeof(block_q5_0) == sizeof(ggml_fp16_t) + sizeof(uint32_t) + QK5_0 / 2, "wrong q5_0 block size/padding");
#define QK5_1 32
#define QR5_1 2
#define QI5_1 (QK5_1 / (4 * QR5_1))
typedef struct {
half2 dm; // dm.x = delta, dm.y = min
uint8_t qh[4]; // 5-th bit of quants
uint8_t qs[QK5_1 / 2]; // nibbles / quants
} block_q5_1;
static_assert(sizeof(block_q5_1) == 2 * sizeof(ggml_fp16_t) + sizeof(uint32_t) + QK5_1 / 2, "wrong q5_1 block size/padding");
#define QK8_0 32
#define QR8_0 1
#define QI8_0 (QK8_0 / (4 * QR8_0))
typedef struct {
half d; // delta
int8_t qs[QK8_0]; // quants
} block_q8_0;
static_assert(sizeof(block_q8_0) == sizeof(ggml_fp16_t) + QK8_0, "wrong q8_0 block size/padding");
#define QK8_1 32
#define QR8_1 1
#define QI8_1 (QK8_1 / (4 * QR8_1))
typedef struct {
half2 ds; // ds.x = delta, ds.y = sum
int8_t qs[QK8_0]; // quants
} block_q8_1;
static_assert(sizeof(block_q8_1) == 2*sizeof(ggml_fp16_t) + QK8_0, "wrong q8_1 block size/padding");
typedef float (*vec_dot_q_cuda_t)(const void * __restrict__ vbq, const block_q8_1 * __restrict__ bq8_1, const int & iqs);
typedef void (*allocate_tiles_cuda_t)(int ** x_ql, half2 ** x_dm, int ** x_qh, int ** x_sc);
typedef void (*load_tiles_cuda_t)(
const void * __restrict__ vx, int * __restrict__ x_ql, half2 * __restrict__ x_dm, int * __restrict__ x_qh,
int * __restrict__ x_sc, const int & i_offset, const int & i_max, const int & k, const int & blocks_per_row);
typedef float (*vec_dot_q_mul_mat_cuda_t)(
const int * __restrict__ x_ql, const half2 * __restrict__ x_dm, const int * __restrict__ x_qh, const int * __restrict__ x_sc,
const int * __restrict__ y_qs, const half2 * __restrict__ y_ms, const int & i, const int & j, const int & k);
#define QR2_K 4
#define QI2_K (QK_K / (4*QR2_K))
typedef struct {
uint8_t scales[QK_K/16]; // scales and mins, quantized with 4 bits
uint8_t qs[QK_K/4]; // quants
half2 dm; // super-block scale for quantized scales/mins
} block_q2_K;
static_assert(sizeof(block_q2_K) == 2*sizeof(ggml_fp16_t) + QK_K/16 + QK_K/4, "wrong q2_K block size/padding");
#define QR3_K 4
#define QI3_K (QK_K / (4*QR3_K))
typedef struct {
uint8_t hmask[QK_K/8]; // quants - high bit
uint8_t qs[QK_K/4]; // quants - low 2 bits
#ifdef GGML_QKK_64
uint8_t scales[2]; // scales, quantized with 8 bits
#else
uint8_t scales[K_SCALE_SIZE]; // scales, quantized with 6 bits
#endif
half d; // super-block scale
} block_q3_K;
//static_assert(sizeof(block_q3_K) == sizeof(ggml_fp16_t) + QK_K / 4 + QK_K / 8 + K_SCALE_SIZE, "wrong q3_K block size/padding");
#define QR4_K 2
#define QI4_K (QK_K / (4*QR4_K))
#ifdef GGML_QKK_64
typedef struct {
half dm[2]; // super-block scales/mins
uint8_t scales[2]; // 4-bit block scales/mins
uint8_t qs[QK_K/2]; // 4--bit quants
} block_q4_K;
static_assert(sizeof(block_q4_K) == sizeof(half2) + QK_K/2 + 2, "wrong q4_K block size/padding");
#else
typedef struct {
half2 dm; // super-block scale for quantized scales/mins
uint8_t scales[3*QK_K/64]; // scales, quantized with 6 bits
uint8_t qs[QK_K/2]; // 4--bit quants
} block_q4_K;
static_assert(sizeof(block_q4_K) == 2*sizeof(ggml_fp16_t) + 3*QK_K/64 + QK_K/2, "wrong q4_K block size/padding");
#endif
#define QR5_K 2
#define QI5_K (QK_K / (4*QR5_K))
#ifdef GGML_QKK_64
typedef struct {
half d; // super-block scale
int8_t scales[QK_K/16]; // block scales
uint8_t qh[QK_K/8]; // quants, high bit
uint8_t qs[QK_K/2]; // quants, low 4 bits
} block_q5_K;
static_assert(sizeof(block_q5_K) == sizeof(ggml_fp16_t) + QK_K/2 + QK_K/8 + QK_K/16, "wrong q5_K block size/padding");
#else
typedef struct {
half2 dm; // super-block scale for quantized scales/mins
uint8_t scales[K_SCALE_SIZE]; // scales and mins, quantized with 6 bits
uint8_t qh[QK_K/8]; // quants, high bit
uint8_t qs[QK_K/2]; // quants, low 4 bits
} block_q5_K;
static_assert(sizeof(block_q5_K) == 2*sizeof(ggml_fp16_t) + K_SCALE_SIZE + QK_K/2 + QK_K/8, "wrong q5_K block size/padding");
#endif
#define QR6_K 2
#define QI6_K (QK_K / (4*QR6_K))
typedef struct {
uint8_t ql[QK_K/2]; // quants, lower 4 bits
uint8_t qh[QK_K/4]; // quants, upper 2 bits
int8_t scales[QK_K/16]; // scales
half d; // delta
} block_q6_K;
static_assert(sizeof(block_q6_K) == sizeof(ggml_fp16_t) + 13*QK_K/16, "wrong q6_K block size/padding");
// In llama.cpp this is only used for intermediate quantization and dot products
typedef struct {
float d; // delta
int8_t qs[QK_K]; // quants
int16_t bsums[QK_K/16]; // sum of quants in groups of 16
} block_q8_K;
static_assert(sizeof(block_q8_K) == sizeof(float) + QK_K + QK_K/16*sizeof(int16_t), "wrong q8_K block size/padding");
template <int qk, int qr, int qi, bool need_sum, typename block_q_t, int mmq_x, int mmq_y, int nwarps,
allocate_tiles_cuda_t allocate_tiles, load_tiles_cuda_t load_tiles, int vdr, vec_dot_q_mul_mat_cuda_t vec_dot>
static __device__ __forceinline__ void mul_mat_q(
const void * __restrict__ vx, const void * __restrict__ vy, float * __restrict__ dst,
const int ncols_x, const int nrows_x, const int ncols_y, const int nrows_y, const int nrows_dst) {
const block_q_t * x = (const block_q_t *) vx;
const block_q8_1 * y = (const block_q8_1 *) vy;
const int blocks_per_row_x = ncols_x / qk;
const int blocks_per_col_y = nrows_y / QK8_1;
const int blocks_per_warp = WARP_SIZE / qi;
const int & ncols_dst = ncols_y;
const int row_dst_0 = blockIdx.x*mmq_y;
const int & row_x_0 = row_dst_0;
const int col_dst_0 = blockIdx.y*mmq_x;
const int & col_y_0 = col_dst_0;
int * tile_x_ql = nullptr;
half2 * tile_x_dm = nullptr;
int * tile_x_qh = nullptr;
int * tile_x_sc = nullptr;
allocate_tiles(&tile_x_ql, &tile_x_dm, &tile_x_qh, &tile_x_sc);
__shared__ int tile_y_qs[mmq_x * WARP_SIZE];
__shared__ half2 tile_y_ds[mmq_x * WARP_SIZE/QI8_1];
float sum[mmq_y/WARP_SIZE][mmq_x/nwarps] = {{0.0f}};
for (int ib0 = 0; ib0 < blocks_per_row_x; ib0 += blocks_per_warp) {
load_tiles(x + row_x_0*blocks_per_row_x + ib0, tile_x_ql, tile_x_dm, tile_x_qh, tile_x_sc,
threadIdx.y, nrows_x-row_x_0-1, threadIdx.x, blocks_per_row_x);
#pragma unroll
for (int ir = 0; ir < qr; ++ir) {
const int kqs = ir*WARP_SIZE + threadIdx.x;
const int kbxd = kqs / QI8_1;
#pragma unroll
for (int i = 0; i < mmq_x; i += nwarps) {
const int col_y_eff = min(col_y_0 + threadIdx.y + i, ncols_y-1); // to prevent out-of-bounds memory accesses
const block_q8_1 * by0 = &y[col_y_eff*blocks_per_col_y + ib0 * (qk/QK8_1) + kbxd];
const int index_y = (threadIdx.y + i) * WARP_SIZE + kqs % WARP_SIZE;
tile_y_qs[index_y] = get_int_from_int8_aligned(by0->qs, threadIdx.x % QI8_1);
}
#pragma unroll
for (int ids0 = 0; ids0 < mmq_x; ids0 += nwarps * QI8_1) {
const int ids = (ids0 + threadIdx.y * QI8_1 + threadIdx.x / (WARP_SIZE/QI8_1)) % mmq_x;
const int kby = threadIdx.x % (WARP_SIZE/QI8_1);
const int col_y_eff = min(col_y_0 + ids, ncols_y-1);
// if the sum is not needed it's faster to transform the scale to f32 ahead of time
const half2 * dsi_src = &y[col_y_eff*blocks_per_col_y + ib0 * (qk/QK8_1) + ir*(WARP_SIZE/QI8_1) + kby].ds;
half2 * dsi_dst = &tile_y_ds[ids * (WARP_SIZE/QI8_1) + kby];
if (need_sum) {
*dsi_dst = *dsi_src;
} else {
float * dfi_dst = (float *) dsi_dst;
*dfi_dst = __low2half(*dsi_src);
}
}
__syncthreads();
// #pragma unroll // unrolling this loop causes too much register pressure
for (int k = ir*WARP_SIZE/qr; k < (ir+1)*WARP_SIZE/qr; k += vdr) {
#pragma unroll
for (int j = 0; j < mmq_x; j += nwarps) {
#pragma unroll
for (int i = 0; i < mmq_y; i += WARP_SIZE) {
sum[i/WARP_SIZE][j/nwarps] += vec_dot(
tile_x_ql, tile_x_dm, tile_x_qh, tile_x_sc, tile_y_qs, tile_y_ds,
threadIdx.x + i, threadIdx.y + j, k);
}
}
}
__syncthreads();
}
}
#pragma unroll
for (int j = 0; j < mmq_x; j += nwarps) {
const int col_dst = col_dst_0 + j + threadIdx.y;
if (col_dst >= ncols_dst) {
return;
}
#pragma unroll
for (int i = 0; i < mmq_y; i += WARP_SIZE) {
const int row_dst = row_dst_0 + threadIdx.x + i;
if (row_dst >= nrows_dst) {
continue;
}
dst[col_dst*nrows_dst + row_dst] = sum[i/WARP_SIZE][j/nwarps];
}
}
}
template <int mmq_y, int nwarps, bool need_check> static __device__ __forceinline__ void load_tiles_q4_0(
const void * __restrict__ vx, int * __restrict__ x_ql, half2 * __restrict__ x_dm, int * __restrict__ x_qh,
int * __restrict__ x_sc, const int & i_offset, const int & i_max, const int & k, const int & blocks_per_row) {
(void)x_qh; (void)x_sc;
const int kbx = k / QI4_0;
const int kqsx = k % QI4_0;
const block_q4_0 * bx0 = (const block_q4_0 *) vx;
float * x_dmf = (float *) x_dm;
#pragma unroll
for (int i0 = 0; i0 < mmq_y; i0 += nwarps) {
int i = i0 + i_offset;
if (need_check) {
i = min(i, i_max);
}
const block_q4_0 * bxi = bx0 + i*blocks_per_row + kbx;
x_ql[i * (WARP_SIZE + 1) + k] = get_int_from_uint8(bxi->qs, kqsx);
// x_dmf[i * (WARP_SIZE/QI4_0) + i / QI4_0 + kbx] = bxi->d;
}
const int blocks_per_tile_x_row = WARP_SIZE / QI4_0;
const int kbxd = k % blocks_per_tile_x_row;
#pragma unroll
for (int i0 = 0; i0 < mmq_y; i0 += nwarps * QI4_0) {
int i = i0 + i_offset * QI4_0 + k / blocks_per_tile_x_row;
if (need_check) {
i = min(i, i_max);
}
const block_q4_0 * bxi = bx0 + i*blocks_per_row + kbxd;
x_dmf[i * (WARP_SIZE/QI4_0) + i / QI4_0 + kbxd] = bxi->d;
}
}
template <int mmq_y, int nwarps, bool need_check> static __device__ __forceinline__ void load_tiles_q4_1(
const void * __restrict__ vx, int * __restrict__ x_ql, half2 * __restrict__ x_dm, int * __restrict__ x_qh,
int * __restrict__ x_sc, const int & i_offset, const int & i_max, const int & k, const int & blocks_per_row) {
GGML_UNUSED(x_qh); GGML_UNUSED(x_sc);
GGML_CUDA_ASSUME(i_offset >= 0);
GGML_CUDA_ASSUME(i_offset < nwarps);
GGML_CUDA_ASSUME(k >= 0);
GGML_CUDA_ASSUME(k < WARP_SIZE);
const int kbx = k / QI4_1;
const int kqsx = k % QI4_1;
const block_q4_1 * bx0 = (const block_q4_1 *) vx;
#pragma unroll
for (int i0 = 0; i0 < mmq_y; i0 += nwarps) {
int i = i0 + i_offset;
if (need_check) {
i = min(i, i_max);
}
const block_q4_1 * bxi = bx0 + i*blocks_per_row + kbx;
x_ql[i * (WARP_SIZE + 1) + k] = get_int_from_uint8_aligned(bxi->qs, kqsx);
}
const int blocks_per_tile_x_row = WARP_SIZE / QI4_1;
const int kbxd = k % blocks_per_tile_x_row;
#pragma unroll
for (int i0 = 0; i0 < mmq_y; i0 += nwarps * QI4_1) {
int i = i0 + i_offset * QI4_1 + k / blocks_per_tile_x_row;
if (need_check) {
i = min(i, i_max);
}
const block_q4_1 * bxi = bx0 + i*blocks_per_row + kbxd;
x_dm[i * (WARP_SIZE/QI4_1) + i / QI4_1 + kbxd] = bxi->dm;
}
}
template <int mmq_y> static __device__ __forceinline__ void allocate_tiles_q4_0(int ** x_ql, half2 ** x_dm, int ** x_qh, int ** x_sc) {
(void)x_qh; (void)x_sc;
__shared__ int tile_x_qs[mmq_y * (WARP_SIZE) + mmq_y];
__shared__ float tile_x_d[mmq_y * (WARP_SIZE/QI4_0) + mmq_y/QI4_0];
*x_ql = tile_x_qs;
*x_dm = (half2 *) tile_x_d;
}
template <int mmq_y> static __device__ __forceinline__ void allocate_tiles_q4_1(int ** x_ql, half2 ** x_dm, int ** x_qh, int ** x_sc) {
GGML_UNUSED(x_qh); GGML_UNUSED(x_sc);
__shared__ int tile_x_qs[mmq_y * (WARP_SIZE) + + mmq_y];
__shared__ half2 tile_x_dm[mmq_y * (WARP_SIZE/QI4_1) + mmq_y/QI4_1];
*x_ql = tile_x_qs;
*x_dm = tile_x_dm;
}
static __device__ __forceinline__ void dequantize_q4_0(const void * vx, const int ib, const int iqs, dfloat2 & v){
const block_q4_0 * x = (const block_q4_0 *) vx;
const dfloat d = x[ib].d;
const int vui = x[ib].qs[iqs];
v.x = vui & 0xF;
v.y = vui >> 4;
#ifdef GGML_CUDA_F16
v = __hsub2(v, {8.0f, 8.0f});
v = __hmul2(v, {d, d});
#else
v.x = (v.x - 8.0f) * d;
v.y = (v.y - 8.0f) * d;
#endif // GGML_CUDA_F16
}
static __device__ __forceinline__ void dequantize_q4_1(const void * vx, const int ib, const int iqs, dfloat2 & v){
const block_q4_1 * x = (const block_q4_1 *) vx;
const dfloat d = __low2half(x[ib].dm);
const dfloat m = __high2half(x[ib].dm);
const int vui = x[ib].qs[iqs];
v.x = vui & 0xF;
v.y = vui >> 4;
#ifdef GGML_CUDA_F16
v = __hmul2(v, {d, d});
v = __hadd2(v, {m, m});
#else
v.x = (v.x * d) + m;
v.y = (v.y * d) + m;
#endif // GGML_CUDA_F16
}
static __device__ __forceinline__ void dequantize_q5_0(const void * vx, const int ib, const int iqs, dfloat2 & v){
const block_q5_0 * x = (const block_q5_0 *) vx;
const dfloat d = x[ib].d;
uint32_t qh;
memcpy(&qh, x[ib].qh, sizeof(qh));
const int xh_0 = ((qh >> (iqs + 0)) << 4) & 0x10;
const int xh_1 = ((qh >> (iqs + 12)) ) & 0x10;
v.x = ((x[ib].qs[iqs] & 0xf) | xh_0);
v.y = ((x[ib].qs[iqs] >> 4) | xh_1);
#ifdef GGML_CUDA_F16
v = __hsub2(v, {16.0f, 16.0f});
v = __hmul2(v, {d, d});
#else
v.x = (v.x - 16.0f) * d;
v.y = (v.y - 16.0f) * d;
#endif // GGML_CUDA_F16
}
static __device__ __forceinline__ void dequantize_q5_1(const void * vx, const int ib, const int iqs, dfloat2 & v){
const block_q5_1 * x = (const block_q5_1 *) vx;
const dfloat d = __low2half(x[ib].dm);
const dfloat m = __high2half(x[ib].dm);
uint32_t qh;
memcpy(&qh, x[ib].qh, sizeof(qh));
const int xh_0 = ((qh >> (iqs + 0)) << 4) & 0x10;
const int xh_1 = ((qh >> (iqs + 12)) ) & 0x10;
v.x = ((x[ib].qs[iqs] & 0xf) | xh_0);
v.y = ((x[ib].qs[iqs] >> 4) | xh_1);
#ifdef GGML_CUDA_F16
v = __hmul2(v, {d, d});
v = __hadd2(v, {m, m});
#else
v.x = (v.x * d) + m;
v.y = (v.y * d) + m;
#endif // GGML_CUDA_F16
}
static __device__ __forceinline__ void dequantize_q8_0(const void * vx, const int ib, const int iqs, dfloat2 & v){
const block_q8_0 * x = (const block_q8_0 *) vx;
const dfloat d = x[ib].d;
v.x = x[ib].qs[iqs + 0];
v.y = x[ib].qs[iqs + 1];
#ifdef GGML_CUDA_F16
v = __hmul2(v, {d, d});
#else
v.x *= d;
v.y *= d;
#endif // GGML_CUDA_F16
}
template <int qk, int qr, dequantize_kernel_t dequantize_kernel, typename dst_t>
static __device__ void dequantize_block(const void * __restrict__ vx, dst_t * __restrict__ y, const int k) {
const int i = 2*(blockDim.x*blockIdx.x + threadIdx.x);
if (i >= k) {
return;
}
const int ib = i/qk; // block index
const int iqs = (i%qk)/qr; // quant index
const int iybs = i - i%qk; // y block start index
const int y_offset = qr == 1 ? 1 : qk/2;
// dequantize
dfloat2 v;
dequantize_kernel(vx, ib, iqs, v);
y[iybs + iqs + 0] = v.x;
y[iybs + iqs + y_offset] = v.y;
}
template<typename dst_t>
static __device__ void dequantize_block_q4_0(const void * __restrict__ vx, dst_t * __restrict__ yy, int nb32) {
const int64_t i = blockIdx.x;
// assume 32 threads
const int tid = threadIdx.x;
const int il = tid/8;
const int ir = tid%8;
const int64_t ib = 8*i + ir;
if (ib >= nb32) {
return;
}
dst_t * y = yy + 256*i + 32*ir + 4*il;
const block_q4_0 * x = (const block_q4_0 *)vx + ib;
const float d = __half2float(x->d);
const float dm = -8*d;
const uint8_t * q = x->qs + 4*il;
for (int l = 0; l < 4; ++l) {
y[l+ 0] = d * (q[l] & 0xF) + dm;
y[l+16] = d * (q[l] >> 4) + dm;
}
}
template<typename dst_t>
static __device__ void dequantize_block_q4_1(const void * __restrict__ vx, dst_t * __restrict__ yy, int nb32) {
const int64_t i = blockIdx.x;
// assume 32 threads
const int tid = threadIdx.x;
const int il = tid/8;
const int ir = tid%8;
const int64_t ib = 8*i + ir;
if (ib >= nb32) {
return;
}
dst_t * y = yy + 256*i + 32*ir + 4*il;
const block_q4_1 * x = (const block_q4_1 *)vx + ib;
const float2 d = __half22float2(x->dm);
const uint8_t * q = x->qs + 4*il;
for (int l = 0; l < 4; ++l) {
y[l+ 0] = d.x * (q[l] & 0xF) + d.y;
y[l+16] = d.x * (q[l] >> 4) + d.y;
}
}
//================================== k-quants
template<typename dst_t>
static __device__ void dequantize_block_q2_K(const void * __restrict__ vx, dst_t * __restrict__ yy) {
const int i = blockIdx.x;
const block_q2_K * x = (const block_q2_K *) vx;
const int tid = threadIdx.x;
#if QK_K == 256
const int n = tid/32;
const int l = tid - 32*n;
const int is = 8*n + l/16;
const uint8_t q = x[i].qs[32*n + l];
dst_t * y = yy + i*QK_K + 128*n;
float dall = __low2half(x[i].dm);
float dmin = __high2half(x[i].dm);
y[l+ 0] = dall * (x[i].scales[is+0] & 0xF) * ((q >> 0) & 3) - dmin * (x[i].scales[is+0] >> 4);
y[l+32] = dall * (x[i].scales[is+2] & 0xF) * ((q >> 2) & 3) - dmin * (x[i].scales[is+2] >> 4);
y[l+64] = dall * (x[i].scales[is+4] & 0xF) * ((q >> 4) & 3) - dmin * (x[i].scales[is+4] >> 4);
y[l+96] = dall * (x[i].scales[is+6] & 0xF) * ((q >> 6) & 3) - dmin * (x[i].scales[is+6] >> 4);
#else
const int is = tid/16; // 0 or 1
const int il = tid%16; // 0...15
const uint8_t q = x[i].qs[il] >> (2*is);
dst_t * y = yy + i*QK_K + 16*is + il;
float dall = __low2half(x[i].dm);
float dmin = __high2half(x[i].dm);
y[ 0] = dall * (x[i].scales[is+0] & 0xF) * ((q >> 0) & 3) - dmin * (x[i].scales[is+0] >> 4);
y[32] = dall * (x[i].scales[is+2] & 0xF) * ((q >> 4) & 3) - dmin * (x[i].scales[is+2] >> 4);
#endif
}
template<typename dst_t>
static __device__ void dequantize_block_q3_K(const void * __restrict__ vx, dst_t * __restrict__ yy) {
const int i = blockIdx.x;
const block_q3_K * x = (const block_q3_K *) vx;
#if QK_K == 256
const int r = threadIdx.x/4;
const int tid = r/2;
const int is0 = r%2;
const int l0 = 16*is0 + 4*(threadIdx.x%4);
const int n = tid / 4;
const int j = tid - 4*n;
uint8_t m = 1 << (4*n + j);
int is = 8*n + 2*j + is0;
int shift = 2*j;
int8_t us = is < 4 ? (x[i].scales[is-0] & 0xF) | (((x[i].scales[is+8] >> 0) & 3) << 4) :
is < 8 ? (x[i].scales[is-0] & 0xF) | (((x[i].scales[is+4] >> 2) & 3) << 4) :
is < 12 ? (x[i].scales[is-8] >> 4) | (((x[i].scales[is+0] >> 4) & 3) << 4) :
(x[i].scales[is-8] >> 4) | (((x[i].scales[is-4] >> 6) & 3) << 4);
float d_all = x[i].d;
float dl = d_all * (us - 32);
dst_t * y = yy + i*QK_K + 128*n + 32*j;
const uint8_t * q = x[i].qs + 32*n;
const uint8_t * hm = x[i].hmask;
for (int l = l0; l < l0+4; ++l) y[l] = dl * ((int8_t)((q[l] >> shift) & 3) - ((hm[l] & m) ? 0 : 4));
#else
const int tid = threadIdx.x;
const int is = tid/16; // 0 or 1
const int il = tid%16; // 0...15
const int im = il/8; // 0...1
const int in = il%8; // 0...7
dst_t * y = yy + i*QK_K + 16*is + il;
const uint8_t q = x[i].qs[il] >> (2*is);
const uint8_t h = x[i].hmask[in] >> (2*is + im);
const float d = (float)x[i].d;
if (is == 0) {
y[ 0] = d * ((x[i].scales[0] & 0xF) - 8) * ((int8_t)((q >> 0) & 3) - ((h >> 0) & 1 ? 0 : 4));
y[32] = d * ((x[i].scales[1] & 0xF) - 8) * ((int8_t)((q >> 4) & 3) - ((h >> 4) & 1 ? 0 : 4));
} else {
y[ 0] = d * ((x[i].scales[0] >> 4) - 8) * ((int8_t)((q >> 0) & 3) - ((h >> 0) & 1 ? 0 : 4));
y[32] = d * ((x[i].scales[1] >> 4) - 8) * ((int8_t)((q >> 4) & 3) - ((h >> 4) & 1 ? 0 : 4));
}
#endif
}
#if QK_K == 256
static inline __device__ void get_scale_min_k4(int j, const uint8_t * q, uint8_t & d, uint8_t & m) {
if (j < 4) {
d = q[j] & 63; m = q[j + 4] & 63;
} else {
d = (q[j+4] & 0xF) | ((q[j-4] >> 6) << 4);
m = (q[j+4] >> 4) | ((q[j-0] >> 6) << 4);
}
}
#endif
template<typename dst_t>
static __device__ void dequantize_block_q4_K(const void * __restrict__ vx, dst_t * __restrict__ yy) {
const block_q4_K * x = (const block_q4_K *) vx;
const int i = blockIdx.x;
#if QK_K == 256
// assume 32 threads
const int tid = threadIdx.x;
const int il = tid/8;
const int ir = tid%8;
const int is = 2*il;
const int n = 4;
dst_t * y = yy + i*QK_K + 64*il + n*ir;
const float dall = __low2half(x[i].dm);
const float dmin = __high2half(x[i].dm);
const uint8_t * q = x[i].qs + 32*il + n*ir;
uint8_t sc, m;
get_scale_min_k4(is + 0, x[i].scales, sc, m);
const float d1 = dall * sc; const float m1 = dmin * m;
get_scale_min_k4(is + 1, x[i].scales, sc, m);
const float d2 = dall * sc; const float m2 = dmin * m;
for (int l = 0; l < n; ++l) {
y[l + 0] = d1 * (q[l] & 0xF) - m1;
y[l +32] = d2 * (q[l] >> 4) - m2;
}
#else
const int tid = threadIdx.x;
const uint8_t * q = x[i].qs;
dst_t * y = yy + i*QK_K;
const float d = (float)x[i].dm[0];
const float m = (float)x[i].dm[1];
y[tid+ 0] = d * (x[i].scales[0] & 0xF) * (q[tid] & 0xF) - m * (x[i].scales[0] >> 4);
y[tid+32] = d * (x[i].scales[1] & 0xF) * (q[tid] >> 4) - m * (x[i].scales[1] >> 4);
#endif
}
template<typename dst_t>
static __device__ void dequantize_block_q5_K(const void * __restrict__ vx, dst_t * __restrict__ yy) {
const block_q5_K * x = (const block_q5_K *) vx;
const int i = blockIdx.x;
#if QK_K == 256
// assume 64 threads - this is very slightly better than the one below
const int tid = threadIdx.x;
const int il = tid/16; // il is in 0...3
const int ir = tid%16; // ir is in 0...15
const int is = 2*il; // is is in 0...6
dst_t * y = yy + i*QK_K + 64*il + 2*ir;
const float dall = __low2half(x[i].dm);
const float dmin = __high2half(x[i].dm);
const uint8_t * ql = x[i].qs + 32*il + 2*ir;
const uint8_t * qh = x[i].qh + 2*ir;
uint8_t sc, m;
get_scale_min_k4(is + 0, x[i].scales, sc, m);
const float d1 = dall * sc; const float m1 = dmin * m;
get_scale_min_k4(is + 1, x[i].scales, sc, m);
const float d2 = dall * sc; const float m2 = dmin * m;
uint8_t hm = 1 << (2*il);
y[ 0] = d1 * ((ql[ 0] & 0xF) + (qh[ 0] & hm ? 16 : 0)) - m1;
y[ 1] = d1 * ((ql[ 1] & 0xF) + (qh[ 1] & hm ? 16 : 0)) - m1;
hm <<= 1;
y[32] = d2 * ((ql[ 0] >> 4) + (qh[ 0] & hm ? 16 : 0)) - m2;
y[33] = d2 * ((ql[ 1] >> 4) + (qh[ 1] & hm ? 16 : 0)) - m2;
#else
const int tid = threadIdx.x;
const uint8_t q = x[i].qs[tid];
const int im = tid/8; // 0...3
const int in = tid%8; // 0...7
const int is = tid/16; // 0 or 1
const uint8_t h = x[i].qh[in] >> im;
const float d = x[i].d;
dst_t * y = yy + i*QK_K + tid;
y[ 0] = d * x[i].scales[is+0] * ((q & 0xF) - ((h >> 0) & 1 ? 0 : 16));
y[32] = d * x[i].scales[is+2] * ((q >> 4) - ((h >> 4) & 1 ? 0 : 16));
#endif
}
template<typename dst_t>
static __device__ void dequantize_block_q6_K(const void * __restrict__ vx, dst_t * __restrict__ yy) {
const block_q6_K * x = (const block_q6_K *) vx;
const int64_t i = blockIdx.x;
#if QK_K == 256
// assume 64 threads - this is very slightly better than the one below
const int64_t tid = threadIdx.x;
const int64_t ip = tid/32; // ip is 0 or 1
const int64_t il = tid - 32*ip; // 0...32
const int64_t is = 8*ip + il/16;
dst_t * y = yy + i*QK_K + 128*ip + il;
const float d = x[i].d;
const uint8_t * ql = x[i].ql + 64*ip + il;
const uint8_t qh = x[i].qh[32*ip + il];
const int8_t * sc = x[i].scales + is;
y[ 0] = d * sc[0] * ((int8_t)((ql[ 0] & 0xF) | (((qh >> 0) & 3) << 4)) - 32);
y[32] = d * sc[2] * ((int8_t)((ql[32] & 0xF) | (((qh >> 2) & 3) << 4)) - 32);
y[64] = d * sc[4] * ((int8_t)((ql[ 0] >> 4) | (((qh >> 4) & 3) << 4)) - 32);
y[96] = d * sc[6] * ((int8_t)((ql[32] >> 4) | (((qh >> 6) & 3) << 4)) - 32);
#else
// assume 32 threads
const int64_t tid = threadIdx.x;
const int64_t ip = tid/16; // 0 or 1
const int64_t il = tid - 16*ip; // 0...15
dst_t * y = yy + i*QK_K + 16*ip + il;
const float d = x[i].d;
const uint8_t ql = x[i].ql[16*ip + il];
const uint8_t qh = x[i].qh[il] >> (2*ip);
const int8_t * sc = x[i].scales;
y[ 0] = d * sc[ip+0] * ((int8_t)((ql & 0xF) | (((qh >> 0) & 3) << 4)) - 32);
y[32] = d * sc[ip+2] * ((int8_t)((ql >> 4) | (((qh >> 4) & 3) << 4)) - 32);
#endif
}
template<typename dst_t>
static __device__ void dequantize_block_q8_0(const void * __restrict__ vx, dst_t * __restrict__ yy, int nb32) {
const int i = blockIdx.x;
// assume 32 threads
const int tid = threadIdx.x;
const int il = tid/8;
const int ir = tid%8;
const int ib = 8*i + ir;
if (ib >= nb32) {
return;
}
dst_t * y = yy + 256*i + 32*ir + 8*il;
const block_q8_0 * x = (const block_q8_0 *)vx + ib;
const float d = __half2float(x->d);
const int8_t * q = x->qs + 8*il;
for (int l = 0; l < 8; ++l) {
y[l] = d * q[l];
}
}
template<typename dst_t>
static __device__ void dequantize_block_q8_K(const void * __restrict__ vx, dst_t * __restrict__ yy) {
const block_q8_K * x = (const block_q8_K *) vx;
const int i = blockIdx.x;
#if QK_K == 256
// assume 32 threads
const int tid = threadIdx.x;
const int il = tid/8;
const int ir = tid%8;
const int n = 8;
dst_t * y = yy + i*QK_K + 64*il + n*ir;
const int8_t * q = x[i].qs + 64*il + n*ir;
for (int l = 0; l < n; ++l) {
y[l] = q[l] * x[i].d;
}
#else
const int tid = threadIdx.x;
const uint8_t * q = x[i].qs;
float * y = yy + i*QK_K;
y[tid] = x[i].d * x[i].scales[0];
#endif
}
template<typename dst_t>
static __device__ void dequantize_block_q5_0(const void * __restrict__ vx, dst_t * __restrict__ yy, int nb32) {
return dequantize_block<QK5_0, QR5_0, dequantize_q5_0>(vx, yy, nb32);
}
template<typename dst_t>
static __device__ void dequantize_block_q5_1(const void * __restrict__ vx, dst_t * __restrict__ yy, int nb32) {
return dequantize_block<QK5_1, QR5_1, dequantize_q5_1>(vx, yy, nb32);
}
#define DEQUANTIZE_K(QNAME) \
extern "C" __global__ void dequantize_block_##QNAME##_f32(const void * __restrict__ vx, float * __restrict__ y) { \
dequantize_block_##QNAME(vx, y); \
} \
extern "C" __global__ void dequantize_block_##QNAME##_f16(const void * __restrict__ vx, half * __restrict__ y) { \
dequantize_block_##QNAME(vx, y); \
} \
#define DEQUANTIZE(QNAME) \
extern "C" __global__ void dequantize_block_##QNAME##_f32(const void * __restrict__ vx, float * __restrict__ y, const int k) { \
dequantize_block_##QNAME(vx, y, k); \
} \
extern "C" __global__ void dequantize_block_##QNAME##_f16(const void * __restrict__ vx, half * __restrict__ y, const int k) { \
dequantize_block_##QNAME(vx, y, k); \
} \
DEQUANTIZE_K(q2_K)
DEQUANTIZE_K(q3_K)
DEQUANTIZE_K(q4_K)
DEQUANTIZE_K(q5_K)
DEQUANTIZE_K(q6_K)
DEQUANTIZE_K(q8_K)
DEQUANTIZE(q4_0)
DEQUANTIZE(q4_1)
DEQUANTIZE(q5_0)
DEQUANTIZE(q5_1)
DEQUANTIZE(q8_0)
template <int qk, int qr, dequantize_kernel_t dequantize_kernel>
static __device__ void dequantize_mul_mat_vec(const void * __restrict__ vx, const dfloat * __restrict__ y, float * __restrict__ dst, const int ncols, const int nrows) {
// qk = quantized weights per x block
// qr = number of quantized weights per data value in x block
const int row = blockIdx.x*blockDim.y + threadIdx.y;
if (row >= nrows) {
return;
}
const int tid = threadIdx.x;
const int iter_stride = 2*GGML_CUDA_DMMV_X;
const int vals_per_iter = iter_stride / WARP_SIZE; // num quantized vals per thread and i iter
const int y_offset = qr == 1 ? 1 : qk/2;
// partial sum for each thread
#ifdef GGML_CUDA_F16
half2 tmp = {0.0f, 0.0f}; // two sums for f16 to take advantage of half2 intrinsics
#else
float tmp = 0.0f;
#endif // GGML_CUDA_F16
for (int i = 0; i < ncols; i += iter_stride) {
const int col = i + vals_per_iter*tid;
const int ib = (row*ncols + col)/qk; // x block index
const int iqs = (col%qk)/qr; // x quant index
const int iybs = col - col%qk; // y block start index
// processing >2 values per i iter is faster for fast GPUs
#pragma unroll
for (int j = 0; j < vals_per_iter; j += 2) {
// process 2 vals per j iter
// dequantize
// for qr = 2 the iqs needs to increase by 1 per j iter because 2 weights per data val
dfloat2 v;
dequantize_kernel(vx, ib, iqs + j/qr, v);
// matrix multiplication
// for qr = 2 the y index needs to increase by 1 per j iter because of y_offset = qk/2
#ifdef GGML_CUDA_F16
tmp += __hmul2(v, {
y[iybs + iqs + j/qr + 0],
y[iybs + iqs + j/qr + y_offset]
});
#else
tmp += v.x * y[iybs + iqs + j/qr + 0];
tmp += v.y * y[iybs + iqs + j/qr + y_offset];
#endif // GGML_CUDA_F16
}
}
// sum up partial sums and write back result
#pragma unroll
for (int mask = 16; mask > 0; mask >>= 1) {
tmp += __shfl_xor_sync(0xffffffff, tmp, mask, 32);
}
if (tid == 0) {
#ifdef GGML_CUDA_F16
dst[row] = tmp.x + tmp.y;
#else
dst[row] = tmp;
#endif // GGML_CUDA_F16
}
}
extern "C" __global__ void dequantize_mul_mat_vec_q4_0_cuda(const void * vx, const dfloat * y, float * dst, const int ncols, const int nrows) {
dequantize_mul_mat_vec<QK4_0, QR4_0, dequantize_q4_0>(vx, y, dst, ncols, nrows);
}
extern "C" __global__ void dequantize_mul_mat_vec_q4_1_cuda(const void * vx, const dfloat * y, float * dst, const int ncols, const int nrows) {
dequantize_mul_mat_vec<QK4_1, QR4_1, dequantize_q4_1>(vx, y, dst, ncols, nrows);
}
extern "C" __global__ void dequantize_mul_mat_vec_q5_0_cuda(const void * vx, const dfloat * y, float * dst, const int ncols, const int nrows) {
dequantize_mul_mat_vec<QK5_0, QR5_0, dequantize_q5_0>(vx, y, dst, ncols, nrows);
}
extern "C" __global__ void dequantize_mul_mat_vec_q5_1_cuda(const void * vx, const dfloat * y, float * dst, const int ncols, const int nrows) {
dequantize_mul_mat_vec<QK5_1, QR5_1, dequantize_q5_1>(vx, y, dst, ncols, nrows);
}
extern "C" __global__ void dequantize_mul_mat_vec_q8_0_cuda(const void * vx, const dfloat * y, float * dst, const int ncols, const int nrows) {
dequantize_mul_mat_vec<QK8_0, QR8_0, dequantize_q8_0>(vx, y, dst, ncols, nrows);
}
extern "C" __global__ void dequantize_mul_mat_vec_q2_k(const void * __restrict__ vx, const float * __restrict__ yy, float * __restrict__ dst, const int ncols, int nrows) {
static_assert(16%K_QUANTS_PER_ITERATION == 0, "16 must be divisible by K_QUANTS_PER_ITERATION");
const int row = blockIdx.x*blockDim.y + threadIdx.y;
if (row > nrows) return;
const int num_blocks_per_row = ncols / QK_K;
const int ib0 = row*num_blocks_per_row;
const block_q2_K * x = (const block_q2_K *)vx + ib0;
float tmp = 0; // partial sum for thread in warp
#if QK_K == 256
const int tid = threadIdx.x/K_QUANTS_PER_ITERATION; // 0...31 or 0...15
const int ix = threadIdx.x%K_QUANTS_PER_ITERATION; // 0 or 0,1
const int step = 16/K_QUANTS_PER_ITERATION;
const int im = tid/step; // 0 or 1. 0 computes 0..., 1 computes 128...
const int in = tid - step*im; // 0...15 or 0...7
const int l0 = K_QUANTS_PER_ITERATION*in; // 0...15 or 0...14 in steps of 2
const int q_offset = 32*im + l0;
const int s_offset = 8*im;
const int y_offset = 128*im + l0;
uint32_t aux[4];
const uint8_t * d = (const uint8_t *)aux;
const uint8_t * m = (const uint8_t *)(aux + 2);
for (int i = ix; i < num_blocks_per_row; i += K_QUANTS_PER_ITERATION) {
const float * y = yy + i * QK_K + y_offset;
const uint8_t * q = x[i].qs + q_offset;
const float dall = __low2half(x[i].dm);
const float dmin = __high2half(x[i].dm);
const uint32_t * a = (const uint32_t *)(x[i].scales + s_offset);
aux[0] = a[0] & 0x0f0f0f0f;
aux[1] = a[1] & 0x0f0f0f0f;
aux[2] = (a[0] >> 4) & 0x0f0f0f0f;
aux[3] = (a[1] >> 4) & 0x0f0f0f0f;
float sum1 = 0, sum2 = 0;
for (int l = 0; l < K_QUANTS_PER_ITERATION; ++l) {
sum1 += y[l+ 0] * d[0] * ((q[l+ 0] >> 0) & 3)
+ y[l+32] * d[2] * ((q[l+ 0] >> 2) & 3)
+ y[l+64] * d[4] * ((q[l+ 0] >> 4) & 3)
+ y[l+96] * d[6] * ((q[l+ 0] >> 6) & 3)
+ y[l+16] * d[1] * ((q[l+16] >> 0) & 3)
+ y[l+48] * d[3] * ((q[l+16] >> 2) & 3)
+ y[l+80] * d[5] * ((q[l+16] >> 4) & 3)
+y[l+112] * d[7] * ((q[l+16] >> 6) & 3);
sum2 += y[l+ 0] * m[0] + y[l+32] * m[2] + y[l+64] * m[4] + y[ l+96] * m[6]
+ y[l+16] * m[1] + y[l+48] * m[3] + y[l+80] * m[5] + y[l+112] * m[7];
}
tmp += dall * sum1 - dmin * sum2;
}
#else
const int tid = threadIdx.x/(2*K_QUANTS_PER_ITERATION); // 0...15 or 0...7
const int ix = threadIdx.x%(2*K_QUANTS_PER_ITERATION); // 0....1 or 0...3
const int offset = tid * K_QUANTS_PER_ITERATION;
uint32_t uaux[2];
const uint8_t * d = (const uint8_t *)uaux;
for (int i = ix; i < num_blocks_per_row; i += 2*K_QUANTS_PER_ITERATION) {
const float * y = yy + i * QK_K + offset;
const uint8_t * q = x[i].qs + offset;
const uint32_t * s = (const uint32_t *)x[i].scales;
uaux[0] = s[0] & 0x0f0f0f0f;
uaux[1] = (s[0] >> 4) & 0x0f0f0f0f;
const float2 dall = __half22float2(x[i].dm);
float sum1 = 0, sum2 = 0;
for (int l = 0; l < K_QUANTS_PER_ITERATION; ++l) {
const uint8_t ql = q[l];
sum1 += y[l+ 0] * d[0] * ((ql >> 0) & 3)
+ y[l+16] * d[1] * ((ql >> 2) & 3)
+ y[l+32] * d[2] * ((ql >> 4) & 3)
+ y[l+48] * d[3] * ((ql >> 6) & 3);
sum2 += y[l+0] * d[4] + y[l+16] * d[5] + y[l+32] * d[6] + y[l+48] * d[7];
}
tmp += dall.x * sum1 - dall.y * sum2;
}
#endif
// sum up partial sums and write back result
#pragma unroll
for (int mask = 16; mask > 0; mask >>= 1) {
tmp += __shfl_xor_sync(0xffffffff, tmp, mask, 32);
}
if (threadIdx.x == 0) {
dst[row] = tmp;
}
}
extern "C" __global__ void dequantize_mul_mat_vec_q3_k(const void * __restrict__ vx, const float * __restrict__ yy, float * __restrict__ dst, const int ncols, int nrows) {
const int row = blockIdx.x*blockDim.y + threadIdx.y;
if (row > nrows) return;
const int num_blocks_per_row = ncols / QK_K;
const int ib0 = row*num_blocks_per_row;
const block_q3_K * x = (const block_q3_K *)vx + ib0;
float tmp = 0; // partial sum for thread in warp
#if QK_K == 256
const uint16_t kmask1 = 0x0303;
const uint16_t kmask2 = 0x0f0f;
const int tid = threadIdx.x/K_QUANTS_PER_ITERATION; // 0...31 or 0...16
const int ix = threadIdx.x%K_QUANTS_PER_ITERATION; // 0 or 0,1
const int n = K_QUANTS_PER_ITERATION; // iterations in the inner loop
const int step = 16/K_QUANTS_PER_ITERATION;
const int im = tid/step; // 0 or 1. 0 computes 0..., 1 computes 128...
const int in = tid - step*im; // 0....15 or 0...7
const uint8_t m = 1 << (4*im);
const int l0 = n*in; // 0...15 or 0...14 in steps of 2
const int q_offset = 32*im + l0;
const int y_offset = 128*im + l0;
uint16_t utmp[4];
const int8_t * s = (const int8_t *)utmp;
const uint16_t s_shift = 4*im;
for (int i = ix; i < num_blocks_per_row; i += K_QUANTS_PER_ITERATION) {
const float * y = yy + i * QK_K + y_offset;
const uint8_t * q = x[i].qs + q_offset;
const uint8_t * h = x[i].hmask + l0;
const uint16_t * a = (const uint16_t *)x[i].scales;
utmp[0] = ((a[0] >> s_shift) & kmask2) | (((a[4] >> (s_shift + 0)) & kmask1) << 4);
utmp[1] = ((a[1] >> s_shift) & kmask2) | (((a[5] >> (s_shift + 0)) & kmask1) << 4);
utmp[2] = ((a[2] >> s_shift) & kmask2) | (((a[4] >> (s_shift + 2)) & kmask1) << 4);
utmp[3] = ((a[3] >> s_shift) & kmask2) | (((a[5] >> (s_shift + 2)) & kmask1) << 4);
const float d = x[i].d;
float sum = 0;
for (int l = 0; l < n; ++l) {
sum += y[l+ 0] * (s[0] - 32) * (((q[l] >> 0) & 3) - (h[l] & (m << 0) ? 0 : 4))
+ y[l+32] * (s[2] - 32) * (((q[l] >> 2) & 3) - (h[l] & (m << 1) ? 0 : 4))
+ y[l+64] * (s[4] - 32) * (((q[l] >> 4) & 3) - (h[l] & (m << 2) ? 0 : 4))
+ y[l+96] * (s[6] - 32) * (((q[l] >> 6) & 3) - (h[l] & (m << 3) ? 0 : 4));
sum += y[l+16] * (s[1] - 32) * (((q[l+16] >> 0) & 3) - (h[l+16] & (m << 0) ? 0 : 4))
+ y[l+48] * (s[3] - 32) * (((q[l+16] >> 2) & 3) - (h[l+16] & (m << 1) ? 0 : 4))
+ y[l+80] * (s[5] - 32) * (((q[l+16] >> 4) & 3) - (h[l+16] & (m << 2) ? 0 : 4))
+ y[l+112] * (s[7] - 32) * (((q[l+16] >> 6) & 3) - (h[l+16] & (m << 3) ? 0 : 4));
}
tmp += d * sum;
}
#else
const int tid = threadIdx.x/(2*K_QUANTS_PER_ITERATION); // 0...15 or 0...7
const int ix = threadIdx.x%(2*K_QUANTS_PER_ITERATION); // 0....1 or 0...3
const int offset = tid * K_QUANTS_PER_ITERATION; // 0...15 or 0...14
const int in = offset/8; // 0 or 1
const int im = offset%8; // 0...7
for (int i = ix; i < num_blocks_per_row; i += 2*K_QUANTS_PER_ITERATION) {
const float * y = yy + i * QK_K + offset;
const uint8_t * q = x[i].qs + offset;
const uint8_t * s = x[i].scales;
const float dall = (float)x[i].d;
float sum = 0;
for (int l = 0; l < K_QUANTS_PER_ITERATION; ++l) {
const uint8_t hl = x[i].hmask[im+l] >> in;
const uint8_t ql = q[l];
sum += y[l+ 0] * dall * ((s[0] & 0xF) - 8) * ((int8_t)((ql >> 0) & 3) - ((hl >> 0) & 1 ? 0 : 4))
+ y[l+16] * dall * ((s[0] >> 4) - 8) * ((int8_t)((ql >> 2) & 3) - ((hl >> 2) & 1 ? 0 : 4))
+ y[l+32] * dall * ((s[1] & 0xF) - 8) * ((int8_t)((ql >> 4) & 3) - ((hl >> 4) & 1 ? 0 : 4))
+ y[l+48] * dall * ((s[1] >> 4) - 8) * ((int8_t)((ql >> 6) & 3) - ((hl >> 6) & 1 ? 0 : 4));
}
tmp += sum;
}
#endif
// sum up partial sums and write back result
#pragma unroll
for (int mask = 16; mask > 0; mask >>= 1) {
tmp += __shfl_xor_sync(0xffffffff, tmp, mask, 32);
}
if (threadIdx.x == 0) {
dst[row] = tmp;
}
}
extern "C" __global__ void dequantize_mul_mat_vec_q4_k(const void * __restrict__ vx, const float * __restrict__ yy, float * __restrict__ dst, const int ncols, int nrows) {
const int row = blockIdx.x*blockDim.y + threadIdx.y;
if (row > nrows) return;
const int num_blocks_per_row = ncols / QK_K;
const int ib0 = row*num_blocks_per_row;
const block_q4_K * x = (const block_q4_K *)vx + ib0;
#if QK_K == 256
const uint16_t kmask1 = 0x3f3f;
const uint16_t kmask2 = 0x0f0f;
const uint16_t kmask3 = 0xc0c0;
const int tid = threadIdx.x/K_QUANTS_PER_ITERATION; // 0...31 or 0...16
const int ix = threadIdx.x%K_QUANTS_PER_ITERATION; // 0 or 0,1
const int step = 8/K_QUANTS_PER_ITERATION; // 8 or 4
const int il = tid/step; // 0...3
const int ir = tid - step*il; // 0...7 or 0...3
const int n = 2 * K_QUANTS_PER_ITERATION; // 2 or 4
const int im = il/2; // 0 or 1. 0 computes 0,32 + 128,160, 1 computes 64,96 + 192,224
const int in = il%2;
const int l0 = n*(2*ir + in);
const int q_offset = 32*im + l0;
const int y_offset = 64*im + l0;
uint16_t aux[4];
const uint8_t * sc = (const uint8_t *)aux;
#if K_QUANTS_PER_ITERATION == 2
uint32_t q32[4];
const uint8_t * q4 = (const uint8_t *)q32;
#else
uint16_t q16[4];
const uint8_t * q4 = (const uint8_t *)q16;
#endif
float tmp = 0; // partial sum for thread in warp
for (int i = ix; i < num_blocks_per_row; i += K_QUANTS_PER_ITERATION) {
const float * y1 = yy + i*QK_K + y_offset;
const float * y2 = y1 + 128;
const float dall = __low2half(x[i].dm);
const float dmin = __high2half(x[i].dm);
const uint16_t * a = (const uint16_t *)x[i].scales;
aux[0] = a[im+0] & kmask1;
aux[1] = a[im+2] & kmask1;
aux[2] = ((a[im+4] >> 0) & kmask2) | ((a[im+0] & kmask3) >> 2);
aux[3] = ((a[im+4] >> 4) & kmask2) | ((a[im+2] & kmask3) >> 2);
#if K_QUANTS_PER_ITERATION == 2
const uint32_t * q1 = (const uint32_t *)(x[i].qs + q_offset);
const uint32_t * q2 = q1 + 16;
q32[0] = q1[0] & 0x0f0f0f0f;
q32[1] = q1[0] & 0xf0f0f0f0;
q32[2] = q2[0] & 0x0f0f0f0f;
q32[3] = q2[0] & 0xf0f0f0f0;
float4 s = {0.f, 0.f, 0.f, 0.f};
float smin = 0;
for (int l = 0; l < 4; ++l) {
s.x += y1[l] * q4[l+0]; s.y += y1[l+32] * q4[l+ 4];
s.z += y2[l] * q4[l+8]; s.w += y2[l+32] * q4[l+12];
smin += y1[l] * sc[2] + y1[l+32] * sc[3] + y2[l] * sc[6] + y2[l+32] * sc[7];
}
tmp += dall * (s.x * sc[0] + s.y * sc[1] * 1.f/16.f + s.z * sc[4] + s.w * sc[5] * 1.f/16.f) - dmin * smin;
#else
const uint16_t * q1 = (const uint16_t *)(x[i].qs + q_offset);
const uint16_t * q2 = q1 + 32;
q16[0] = q1[0] & 0x0f0f;
q16[1] = q1[0] & 0xf0f0;
q16[2] = q2[0] & 0x0f0f;
q16[3] = q2[0] & 0xf0f0;
float4 s = {0.f, 0.f, 0.f, 0.f};
float smin = 0;
for (int l = 0; l < 2; ++l) {
s.x += y1[l] * q4[l+0]; s.y += y1[l+32] * q4[l+2];
s.z += y2[l] * q4[l+4]; s.w += y2[l+32] * q4[l+6];
smin += y1[l] * sc[2] + y1[l+32] * sc[3] + y2[l] * sc[6] + y2[l+32] * sc[7];
}
tmp += dall * (s.x * sc[0] + s.y * sc[1] * 1.f/16.f + s.z * sc[4] + s.w * sc[5] * 1.f/16.f) - dmin * smin;
#endif
}
#else
const int tid = threadIdx.x/(2*K_QUANTS_PER_ITERATION); // 0...15
const int ix = threadIdx.x%(2*K_QUANTS_PER_ITERATION);
const int step = tid * K_QUANTS_PER_ITERATION;
uint16_t aux16[2];
const uint8_t * s = (const uint8_t *)aux16;
float tmp = 0;
for (int i = ix; i < num_blocks_per_row; i += 2*K_QUANTS_PER_ITERATION) {
const uint8_t * q = x[i].qs + step;
const float * y = yy + i*QK_K + step;
const uint16_t * a = (const uint16_t *)x[i].scales;
aux16[0] = a[0] & 0x0f0f;
aux16[1] = (a[0] >> 4) & 0x0f0f;
const float d = (float)x[i].dm[0];
const float m = (float)x[i].dm[1];
float sum = 0.f;
for (int j = 0; j < K_QUANTS_PER_ITERATION; ++j) {
sum += y[j+ 0] * (d * s[0] * (q[j+ 0] & 0xF) - m * s[2])
+ y[j+16] * (d * s[0] * (q[j+16] & 0xF) - m * s[2])
+ y[j+32] * (d * s[1] * (q[j+ 0] >> 4) - m * s[3])
+ y[j+48] * (d * s[1] * (q[j+16] >> 4) - m * s[3]);
}
tmp += sum;
}
#endif
// sum up partial sums and write back result
#pragma unroll
for (int mask = 16; mask > 0; mask >>= 1) {
tmp += __shfl_xor_sync(0xffffffff, tmp, mask, 32);
}
if (tid == 0) {
dst[row] = tmp;
}
}
extern "C" __global__ void dequantize_mul_mat_vec_q5_k(const void * __restrict__ vx, const float * __restrict__ yy, float * __restrict__ dst, const int ncols) {
const int row = blockIdx.x;
const int num_blocks_per_row = ncols / QK_K;
const int ib0 = row*num_blocks_per_row;
const block_q5_K * x = (const block_q5_K *)vx + ib0;
float tmp = 0; // partial sum for thread in warp
#if QK_K == 256
const uint16_t kmask1 = 0x3f3f;
const uint16_t kmask2 = 0x0f0f;
const uint16_t kmask3 = 0xc0c0;
const int tid = threadIdx.x/2; // 0...15
const int ix = threadIdx.x%2;
const int il = tid/4; // 0...3
const int ir = tid - 4*il;// 0...3
const int n = 2;
const int im = il/2; // 0 or 1. 0 computes 0,32 + 128,160, 1 computes 64,96 + 192,224
const int in = il%2;
const int l0 = n*(2*ir + in);
const int q_offset = 32*im + l0;
const int y_offset = 64*im + l0;
const uint8_t hm1 = 1 << (2*im);
const uint8_t hm2 = hm1 << 4;
uint16_t aux[4];
const uint8_t * sc = (const uint8_t *)aux;
uint16_t q16[8];
const uint8_t * q4 = (const uint8_t *)q16;
for (int i = ix; i < num_blocks_per_row; i += 2) {
const uint8_t * ql1 = x[i].qs + q_offset;
const uint8_t * qh = x[i].qh + l0;
const float * y1 = yy + i*QK_K + y_offset;
const float * y2 = y1 + 128;
const float dall = __low2half(x[i].dm);
const float dmin = __high2half(x[i].dm);
const uint16_t * a = (const uint16_t *)x[i].scales;
aux[0] = a[im+0] & kmask1;
aux[1] = a[im+2] & kmask1;
aux[2] = ((a[im+4] >> 0) & kmask2) | ((a[im+0] & kmask3) >> 2);
aux[3] = ((a[im+4] >> 4) & kmask2) | ((a[im+2] & kmask3) >> 2);
float4 sum = {0.f, 0.f, 0.f, 0.f};
float smin = 0;
const uint16_t * q1 = (const uint16_t *)ql1;
const uint16_t * q2 = q1 + 32;
q16[0] = q1[0] & 0x0f0f;
q16[1] = q1[8] & 0x0f0f;
q16[2] = (q1[0] >> 4) & 0x0f0f;
q16[3] = (q1[8] >> 4) & 0x0f0f;
q16[4] = q2[0] & 0x0f0f;
q16[5] = q2[8] & 0x0f0f;
q16[6] = (q2[0] >> 4) & 0x0f0f;
q16[7] = (q2[8] >> 4) & 0x0f0f;
for (int l = 0; l < n; ++l) {
sum.x += y1[l+ 0] * (q4[l +0] + (qh[l+ 0] & (hm1 << 0) ? 16 : 0))
+ y1[l+16] * (q4[l +2] + (qh[l+16] & (hm1 << 0) ? 16 : 0));
sum.y += y1[l+32] * (q4[l +4] + (qh[l+ 0] & (hm1 << 1) ? 16 : 0))
+ y1[l+48] * (q4[l +6] + (qh[l+16] & (hm1 << 1) ? 16 : 0));
sum.z += y2[l+ 0] * (q4[l +8] + (qh[l+ 0] & (hm2 << 0) ? 16 : 0))
+ y2[l+16] * (q4[l+10] + (qh[l+16] & (hm2 << 0) ? 16 : 0));
sum.w += y2[l+32] * (q4[l+12] + (qh[l+ 0] & (hm2 << 1) ? 16 : 0))
+ y2[l+48] * (q4[l+14] + (qh[l+16] & (hm2 << 1) ? 16 : 0));
smin += (y1[l] + y1[l+16]) * sc[2] + (y1[l+32] + y1[l+48]) * sc[3]
+ (y2[l] + y2[l+16]) * sc[6] + (y2[l+32] + y2[l+48]) * sc[7];
}
tmp += dall * (sum.x * sc[0] + sum.y * sc[1] + sum.z * sc[4] + sum.w * sc[5]) - dmin * smin;
}
#else
const int tid = threadIdx.x/(2*K_QUANTS_PER_ITERATION); // 0...15
const int ix = threadIdx.x%(2*K_QUANTS_PER_ITERATION);
const int step = tid * K_QUANTS_PER_ITERATION;
const int im = step/8;
const int in = step%8;
for (int i = ix; i < num_blocks_per_row; i += 2*K_QUANTS_PER_ITERATION) {
const uint8_t * q = x[i].qs + step;
const int8_t * s = x[i].scales;
const float * y = yy + i*QK_K + step;
const float d = x[i].d;
float sum = 0.f;
for (int j = 0; j < K_QUANTS_PER_ITERATION; ++j) {
const uint8_t h = x[i].qh[in+j] >> im;
sum += y[j+ 0] * d * s[0] * ((q[j+ 0] & 0xF) - ((h >> 0) & 1 ? 0 : 16))
+ y[j+16] * d * s[1] * ((q[j+16] & 0xF) - ((h >> 2) & 1 ? 0 : 16))
+ y[j+32] * d * s[2] * ((q[j+ 0] >> 4) - ((h >> 4) & 1 ? 0 : 16))
+ y[j+48] * d * s[3] * ((q[j+16] >> 4) - ((h >> 6) & 1 ? 0 : 16));
}
tmp += sum;
}
#endif
// sum up partial sums and write back result
#pragma unroll
for (int mask = 16; mask > 0; mask >>= 1) {
tmp += __shfl_xor_sync(0xffffffff, tmp, mask, 32);
}
if (threadIdx.x == 0) {
dst[row] = tmp;
}
}
extern "C" __global__ void dequantize_mul_mat_vec_q6_k(const void * __restrict__ vx, const float * __restrict__ yy, float * __restrict__ dst, const int ncols, int nrows) {
static_assert(16%K_QUANTS_PER_ITERATION == 0, "16 must be divisible by K_QUANTS_PER_ITERATION");
const int row = blockIdx.x*blockDim.y + threadIdx.y;
if (row > nrows) return;
const int num_blocks_per_row = ncols / QK_K;
const int ib0 = row*num_blocks_per_row;
const block_q6_K * x = (const block_q6_K *)vx + ib0;
#if QK_K == 256
const int tid = threadIdx.x/K_QUANTS_PER_ITERATION; // 0...31 or 0...16
const int ix = threadIdx.x%K_QUANTS_PER_ITERATION; // 0 or 0, 1
const int step = 16/K_QUANTS_PER_ITERATION; // 16 or 8
const int im = tid/step; // 0 or 1. 0 computes 0..., 1 computes 128...
const int in = tid - step*im; // 0...15 or 0...7
#if K_QUANTS_PER_ITERATION == 1
const int l0 = K_QUANTS_PER_ITERATION*in; // 0...15
const int is = 0;
#else
const int l0 = 4 * in; // 0, 4, 8, ..., 28
const int is = in / 4;
#endif
const int ql_offset = 64*im + l0;
const int qh_offset = 32*im + l0;
const int s_offset = 8*im + is;
const int y_offset = 128*im + l0;
float tmp = 0; // partial sum for thread in warp
for (int i = ix; i < num_blocks_per_row; i += K_QUANTS_PER_ITERATION) {
const float * y = yy + i * QK_K + y_offset;
const uint8_t * ql = x[i].ql + ql_offset;
const uint8_t * qh = x[i].qh + qh_offset;
const int8_t * s = x[i].scales + s_offset;
const float d = x[i].d;
#if K_QUANTS_PER_ITERATION == 1
float sum = y[ 0] * s[0] * d * ((int8_t)((ql[ 0] & 0xF) | ((qh[ 0] & 0x03) << 4)) - 32)
+ y[16] * s[1] * d * ((int8_t)((ql[16] & 0xF) | ((qh[16] & 0x03) << 4)) - 32)
+ y[32] * s[2] * d * ((int8_t)((ql[32] & 0xF) | ((qh[ 0] & 0x0c) << 2)) - 32)
+ y[48] * s[3] * d * ((int8_t)((ql[48] & 0xF) | ((qh[16] & 0x0c) << 2)) - 32)
+ y[64] * s[4] * d * ((int8_t)((ql[ 0] >> 4) | ((qh[ 0] & 0x30) >> 0)) - 32)
+ y[80] * s[5] * d * ((int8_t)((ql[16] >> 4) | ((qh[16] & 0x30) >> 0)) - 32)
+ y[96] * s[6] * d * ((int8_t)((ql[32] >> 4) | ((qh[ 0] & 0xc0) >> 2)) - 32)
+y[112] * s[7] * d * ((int8_t)((ql[48] >> 4) | ((qh[16] & 0xc0) >> 2)) - 32);
tmp += sum;
#else
float sum = 0;
for (int l = 0; l < 4; ++l) {
sum += y[l+ 0] * s[0] * d * ((int8_t)((ql[l+ 0] & 0xF) | (((qh[l] >> 0) & 3) << 4)) - 32)
+ y[l+32] * s[2] * d * ((int8_t)((ql[l+32] & 0xF) | (((qh[l] >> 2) & 3) << 4)) - 32)
+ y[l+64] * s[4] * d * ((int8_t)((ql[l+ 0] >> 4) | (((qh[l] >> 4) & 3) << 4)) - 32)
+ y[l+96] * s[6] * d * ((int8_t)((ql[l+32] >> 4) | (((qh[l] >> 6) & 3) << 4)) - 32);
}
tmp += sum;
#endif
}
#else
const int tid = threadIdx.x/(2*K_QUANTS_PER_ITERATION); // 0...7
const int ix = threadIdx.x%(2*K_QUANTS_PER_ITERATION); // 0...3
const int step = tid * K_QUANTS_PER_ITERATION;
float tmp = 0; // partial sum for thread in warp
for (int i = ix; i < num_blocks_per_row; i += 2*K_QUANTS_PER_ITERATION) {
const float * y = yy + i * QK_K + step;
const uint8_t * ql = x[i].ql + step;
const uint8_t * qh = x[i].qh + step;
const int8_t * s = x[i].scales;
const float d = x[i+0].d;
float sum = 0;
for (int j = 0; j < K_QUANTS_PER_ITERATION; ++j) {
sum += y[j+ 0] * s[0] * d * ((int8_t)((ql[j+ 0] & 0xF) | ((qh[j] & 0x03) << 4)) - 32)
+ y[j+16] * s[1] * d * ((int8_t)((ql[j+16] & 0xF) | ((qh[j] & 0x0c) << 2)) - 32)
+ y[j+32] * s[2] * d * ((int8_t)((ql[j+ 0] >> 4) | ((qh[j] & 0x30) >> 0)) - 32)
+ y[j+48] * s[3] * d * ((int8_t)((ql[j+16] >> 4) | ((qh[j] & 0xc0) >> 2)) - 32);
}
tmp += sum;
}
#endif
// sum up partial sums and write back result
#pragma unroll
for (int mask = 16; mask > 0; mask >>= 1) {
tmp += __shfl_xor_sync(0xffffffff, tmp, mask, 32);
}
if (tid == 0) {
dst[row] = tmp;
}
}
// VDR = vec dot ratio, how many contiguous integers each thread processes when the vec dot kernel is called
// MMVQ = mul_mat_vec_q, MMQ = mul_mat_q
#define VDR_Q4_0_Q8_1_MMVQ 2
#define VDR_Q4_0_Q8_1_MMQ 4
template <int vdr> static __device__ __forceinline__ float vec_dot_q4_0_q8_1_impl(
const int * v, const int * u, const float & d4, const half2 & ds8) {
int sumi = 0;
#pragma unroll
for (int i = 0; i < vdr; ++i) {
const int vi0 = (v[i] >> 0) & 0x0F0F0F0F;
const int vi1 = (v[i] >> 4) & 0x0F0F0F0F;
// SIMD dot product of quantized values
sumi = ggml_cuda_dp4a(vi0, u[2*i+0], sumi);
sumi = ggml_cuda_dp4a(vi1, u[2*i+1], sumi);
}
const float2 ds8f = __half22float2(ds8);
// second part effectively subtracts 8 from each quant value
return d4 * (sumi * ds8f.x - (8*vdr/QI4_0) * ds8f.y);
}
#define VDR_Q4_1_Q8_1_MMVQ 2
#define VDR_Q4_1_Q8_1_MMQ 4
template <int vdr> static __device__ __forceinline__ float vec_dot_q4_1_q8_1_impl(
const int * v, const int * u, const half2 & dm4, const half2 & ds8) {
int sumi = 0;
#pragma unroll
for (int i = 0; i < vdr; ++i) {
const int vi0 = (v[i] >> 0) & 0x0F0F0F0F;
const int vi1 = (v[i] >> 4) & 0x0F0F0F0F;
// SIMD dot product of quantized values
sumi = ggml_cuda_dp4a(vi0, u[2*i+0], sumi);
sumi = ggml_cuda_dp4a(vi1, u[2*i+1], sumi);
}
#ifdef GGML_CUDA_F16
const float2 tmp = __half22float2(__hmul2(dm4, ds8));
const float d4d8 = tmp.x;
const float m4s8 = tmp.y;
#else
const float2 dm4f = __half22float2(dm4);
const float2 ds8f = __half22float2(ds8);
const float d4d8 = dm4f.x * ds8f.x;
const float m4s8 = dm4f.y * ds8f.y;
#endif // GGML_CUDA_F16
// scale second part of sum by QI8_1/(vdr * QR4_1) to compensate for multiple threads adding it
return sumi * d4d8 + m4s8 / (QI8_1 / (vdr * QR4_1));
}
#define VDR_Q5_0_Q8_1_MMVQ 2
#define VDR_Q5_0_Q8_1_MMQ 4
template <int vdr> static __device__ __forceinline__ float vec_dot_q5_0_q8_1_impl(
const int * vl, const int * vh, const int * u, const float & d5, const half2 & ds8) {
int sumi = 0;
#pragma unroll
for (int i = 0; i < vdr; ++i) {
int vi0 = (vl[i] >> 0) & 0x0F0F0F0F; // lower 4 qs bits, still need qh as 5th bits
vi0 |= (vh[i] << 4) & 0x00000010; // 0 -> 4
vi0 |= (vh[i] << 11) & 0x00001000; // 1 -> 12
vi0 |= (vh[i] << 18) & 0x00100000; // 2 -> 20
vi0 |= (vh[i] << 25) & 0x10000000; // 3 -> 28
sumi = ggml_cuda_dp4a(vi0, u[2*i+0], sumi); // SIMD dot product of quantized values
int vi1 = (vl[i] >> 4) & 0x0F0F0F0F; // upper 4 qs bits, still need qh as 5th bits
vi1 |= (vh[i] >> 12) & 0x00000010; // 16 -> 4
vi1 |= (vh[i] >> 5) & 0x00001000; // 17 -> 12
vi1 |= (vh[i] << 2) & 0x00100000; // 18 -> 20
vi1 |= (vh[i] << 9) & 0x10000000; // 19 -> 28
sumi = ggml_cuda_dp4a(vi1, u[2*i+1], sumi); // SIMD dot product of quantized values
}
const float2 ds8f = __half22float2(ds8);
// second part effectively subtracts 16 from each quant value
return d5 * (sumi * ds8f.x - (16*vdr/QI5_0) * ds8f.y);
}
#define VDR_Q5_1_Q8_1_MMVQ 2
#define VDR_Q5_1_Q8_1_MMQ 4
template <int vdr> static __device__ __forceinline__ float vec_dot_q5_1_q8_1_impl(
const int * vl, const int * vh, const int * u, const half2 & dm5, const half2 & ds8) {
int sumi = 0;
#pragma unroll
for (int i = 0; i < vdr; ++i) {
int vi0 = (vl[i] >> 0) & 0x0F0F0F0F; // lower 4 qs bits, still need qh as 5th bits
vi0 |= (vh[i] << 4) & 0x00000010; // 0 -> 4
vi0 |= (vh[i] << 11) & 0x00001000; // 1 -> 12
vi0 |= (vh[i] << 18) & 0x00100000; // 2 -> 20
vi0 |= (vh[i] << 25) & 0x10000000; // 3 -> 28
sumi = ggml_cuda_dp4a(vi0, u[2*i+0], sumi); // SIMD dot product of quantized values
int vi1 = (vl[i] >> 4) & 0x0F0F0F0F; // upper 4 qs bits, still need qh as 5th bits
vi1 |= (vh[i] >> 12) & 0x00000010; // 16 -> 4
vi1 |= (vh[i] >> 5) & 0x00001000; // 17 -> 12
vi1 |= (vh[i] << 2) & 0x00100000; // 18 -> 20
vi1 |= (vh[i] << 9) & 0x10000000; // 19 -> 28
sumi = ggml_cuda_dp4a(vi1, u[2*i+1], sumi); // SIMD dot product of quantized values
}
#ifdef GGML_CUDA_F16
const float2 tmp = __half22float2(__hmul2(dm5, ds8));
const float d5d8 = tmp.x;
const float m5s8 = tmp.y;
#else
const float2 dm5f = __half22float2(dm5);
const float2 ds8f = __half22float2(ds8);
const float d5d8 = dm5f.x * ds8f.x;
const float m5s8 = dm5f.y * ds8f.y;
#endif // GGML_CUDA_F16
// scale second part of sum by QI5_1 / vdr to compensate for multiple threads adding it
return sumi*d5d8 + m5s8 / (QI5_1 / vdr);
}
#define VDR_Q8_0_Q8_1_MMVQ 2
#define VDR_Q8_0_Q8_1_MMQ 8
template <int vdr> static __device__ __forceinline__ float vec_dot_q8_0_q8_1_impl(
const int * v, const int * u, const float & d8_0, const float & d8_1) {
int sumi = 0;
#pragma unroll
for (int i = 0; i < vdr; ++i) {
// SIMD dot product of quantized values
sumi = ggml_cuda_dp4a(v[i], u[i], sumi);
}
return d8_0*d8_1 * sumi;
}
template <int vdr> static __device__ __forceinline__ float vec_dot_q8_1_q8_1_impl(
const int * v, const int * u, const half2 & dm8, const half2 & ds8) {
int sumi = 0;
#pragma unroll
for (int i = 0; i < vdr; ++i) {
// SIMD dot product of quantized values
sumi = ggml_cuda_dp4a(v[i], u[i], sumi);
}
#ifdef GGML_CUDA_F16
const float2 tmp = __half22float2(__hmul2(dm8, ds8));
const float d8d8 = tmp.x;
const float m8s8 = tmp.y;
#else
const float2 dm8f = __half22float2(dm8);
const float2 ds8f = __half22float2(ds8);
const float d8d8 = dm8f.x * ds8f.x;
const float m8s8 = dm8f.y * ds8f.y;
#endif // GGML_CUDA_F16
// scale second part of sum by QI8_1/ vdr to compensate for multiple threads adding it
return sumi*d8d8 + m8s8 / (QI8_1 / vdr);
}
#define VDR_Q2_K_Q8_1_MMVQ 1
#define VDR_Q2_K_Q8_1_MMQ 2
// contiguous v/x values
static __device__ __forceinline__ float vec_dot_q2_K_q8_1_impl_mmvq(
const int & v, const int * __restrict__ u, const uint8_t * __restrict__ scales,
const half2 & dm2, const float * __restrict__ d8) {
float sumf_d = 0.0f;
float sumf_m = 0.0f;
#pragma unroll
for (int i = 0; i < QR2_K; ++i) {
const int sc = scales[2*i];
const int vi = (v >> (2*i)) & 0x03030303;
sumf_d += d8[i] * (ggml_cuda_dp4a(vi, u[i], 0) * (sc & 0xF)); // SIMD dot product
// fill int with 4x m
int m = sc >> 4;
m |= m << 8;
m |= m << 16;
sumf_m += d8[i] * ggml_cuda_dp4a(m, u[i], 0); // multiply constant q2_K part with sum of q8_1 values
}
const float2 dm2f = __half22float2(dm2);
return dm2f.x*sumf_d - dm2f.y*sumf_m;
}
// contiguous u/y values
static __device__ __forceinline__ float vec_dot_q2_K_q8_1_impl_mmq(
const int * __restrict__ v, const int * __restrict__ u, const uint8_t * __restrict__ scales,
const half2 & dm2, const float & d8) {
int sumi_d = 0;
int sumi_m = 0;
#pragma unroll
for (int i0 = 0; i0 < QI8_1; i0 += QI8_1/2) {
int sumi_d_sc = 0;
const int sc = scales[i0 / (QI8_1/2)];
// fill int with 4x m
int m = sc >> 4;
m |= m << 8;
m |= m << 16;
#pragma unroll
for (int i = i0; i < i0 + QI8_1/2; ++i) {
sumi_d_sc = ggml_cuda_dp4a(v[i], u[i], sumi_d_sc); // SIMD dot product
sumi_m = ggml_cuda_dp4a(m, u[i], sumi_m); // multiply sum of q8_1 values with m
}
sumi_d += sumi_d_sc * (sc & 0xF);
}
const float2 dm2f = __half22float2(dm2);
return d8 * (dm2f.x*sumi_d - dm2f.y*sumi_m);
}
#define VDR_Q3_K_Q8_1_MMVQ 1
#define VDR_Q3_K_Q8_1_MMQ 2
// contiguous v/x values
static __device__ __forceinline__ float vec_dot_q3_K_q8_1_impl_mmvq(
const int & vl, const int & vh, const int * __restrict__ u, const uint8_t * __restrict__ scales,
const int & scale_offset, const float & d3, const float * __restrict__ d8) {
float sumf = 0.0f;
#pragma unroll
for (int i = 0; i < QR3_K; ++i) {
const int isc = scale_offset + 2*i;
const int isc_low = isc % (QK_K/32);
const int sc_shift_low = 4 * (isc / (QK_K/32));
const int sc_low = (scales[isc_low] >> sc_shift_low) & 0xF;
const int isc_high = isc % (QK_K/64);
const int sc_shift_high = 2 * (isc / (QK_K/64));
const int sc_high = ((scales[(QK_K/32) + isc_high] >> sc_shift_high) & 3) << 4;
const int sc = (sc_low | sc_high) - 32;
const int vil = (vl >> (2*i)) & 0x03030303;
const int vih = ((vh >> i) << 2) & 0x04040404;
const int vi = __vsubss4(vil, vih);
sumf += d8[i] * (ggml_cuda_dp4a(vi, u[i], 0) * sc); // SIMD dot product
}
return d3 * sumf;
}
// contiguous u/y values
static __device__ __forceinline__ float vec_dot_q3_K_q8_1_impl_mmq(
const int * __restrict__ v, const int * __restrict__ u, const int8_t * __restrict__ scales,
const float & d3, const float & d8) {
int sumi = 0;
#pragma unroll
for (int i0 = 0; i0 < QR3_K*VDR_Q3_K_Q8_1_MMQ; i0 += QI8_1/2) {
int sumi_sc = 0;
for (int i = i0; i < i0 + QI8_1/2; ++i) {
sumi_sc = ggml_cuda_dp4a(v[i], u[i], sumi_sc); // SIMD dot product
}
sumi += sumi_sc * scales[i0 / (QI8_1/2)];
}
return d3*d8 * sumi;
}
#define VDR_Q4_K_Q8_1_MMVQ 2
#define VDR_Q4_K_Q8_1_MMQ 8
// contiguous v/x values
static __device__ __forceinline__ float vec_dot_q4_K_q8_1_impl_vmmq(
const int * __restrict__ v, const int * __restrict__ u, const uint8_t * __restrict__ sc,
const uint8_t * __restrict__ m, const half2 & dm4, const float * __restrict__ d8) {
float sumf_d = 0.0f;
float sumf_m = 0.0f;
#pragma unroll
for (int i = 0; i < QR4_K; ++i) {
const int v0i = (v[0] >> (4*i)) & 0x0F0F0F0F;
const int v1i = (v[1] >> (4*i)) & 0x0F0F0F0F;
const int dot1 = ggml_cuda_dp4a(v1i, u[2*i+1], ggml_cuda_dp4a(v0i, u[2*i+0], 0)); // SIMD dot product
const int dot2 = ggml_cuda_dp4a(0x01010101, u[2*i+1], ggml_cuda_dp4a(0x01010101, u[2*i+0], 0)); // sum of u
sumf_d += d8[i] * (dot1 * sc[i]);
sumf_m += d8[i] * (dot2 * m[i]); // multiply constant part of q4_K with sum of q8_1 values
}
const float2 dm4f = __half22float2(dm4);
return dm4f.x*sumf_d - dm4f.y*sumf_m;
}
// contiguous u/y values
static __device__ __forceinline__ float vec_dot_q4_K_q8_1_impl_mmq(
const int * __restrict__ v, const int * __restrict__ u, const uint8_t * __restrict__ sc,
const uint8_t * __restrict__ m, const half2 & dm4, const half2 * __restrict__ ds8) {
float sumf_d = 0.0f;
float sumf_m = 0.0f;
#pragma unroll
for (int i = 0; i < QR4_K*VDR_Q4_K_Q8_1_MMQ/QI8_1; ++i) {
int sumi_d = 0;
#pragma unroll
for (int j = 0; j < QI8_1; ++j) {
sumi_d = ggml_cuda_dp4a((v[j] >> (4*i)) & 0x0F0F0F0F, u[i*QI8_1 + j], sumi_d); // SIMD dot product
}
const float2 ds8f = __half22float2(ds8[i]);
sumf_d += ds8f.x * (sc[i] * sumi_d);
sumf_m += ds8f.y * m[i]; // sum of q8_1 block * q4_K min val
}
const float2 dm4f = __half22float2(dm4);
return dm4f.x*sumf_d - dm4f.y*sumf_m;
}
#define VDR_Q5_K_Q8_1_MMVQ 2
#define VDR_Q5_K_Q8_1_MMQ 8
// contiguous v/x values
static __device__ __forceinline__ float vec_dot_q5_K_q8_1_impl_vmmq(
const int * __restrict__ vl, const int * __restrict__ vh, const int * __restrict__ u, const uint8_t * __restrict__ sc,
const uint8_t * __restrict__ m, const half2 & dm5, const float * __restrict__ d8) {
float sumf_d = 0.0f;
float sumf_m = 0.0f;
#pragma unroll
for (int i = 0; i < QR5_K; ++i) {
const int vl0i = (vl[0] >> (4*i)) & 0x0F0F0F0F;
const int vl1i = (vl[1] >> (4*i)) & 0x0F0F0F0F;
const int vh0i = ((vh[0] >> i) << 4) & 0x10101010;
const int vh1i = ((vh[1] >> i) << 4) & 0x10101010;
const int v0i = vl0i | vh0i;
const int v1i = vl1i | vh1i;
const int dot1 = ggml_cuda_dp4a(v0i, u[2*i+0], ggml_cuda_dp4a(v1i, u[2*i+1], 0)); // SIMD dot product
const int dot2 = ggml_cuda_dp4a(0x01010101, u[2*i+0], ggml_cuda_dp4a(0x01010101, u[2*i+1], 0)); // sum of u
sumf_d += d8[i] * (dot1 * sc[i]);
sumf_m += d8[i] * (dot2 * m[i]);
}
const float2 dm5f = __half22float2(dm5);
return dm5f.x*sumf_d - dm5f.y*sumf_m;
}
// contiguous u/y values
static __device__ __forceinline__ float vec_dot_q5_K_q8_1_impl_mmq(
const int * __restrict__ v, const int * __restrict__ u, const uint8_t * __restrict__ sc,
const uint8_t * __restrict__ m, const half2 & dm4, const half2 * __restrict__ ds8) {
float sumf_d = 0.0f;
float sumf_m = 0.0f;
#pragma unroll
for (int i = 0; i < QR5_K*VDR_Q5_K_Q8_1_MMQ/QI8_1; ++i) {
int sumi_d = 0;
#pragma unroll
for (int j = 0; j < QI8_1; ++j) {
sumi_d = ggml_cuda_dp4a(v[i*QI8_1 + j], u[i*QI8_1 + j], sumi_d); // SIMD dot product
}
const float2 ds8f = __half22float2(ds8[i]);
sumf_d += ds8f.x * (sc[i] * sumi_d);
sumf_m += ds8f.y * m[i]; // sum of q8_1 block * q4_K min val
}
const float2 dm4f = __half22float2(dm4);
return dm4f.x*sumf_d - dm4f.y*sumf_m;
}
#define VDR_Q6_K_Q8_1_MMVQ 1
#define VDR_Q6_K_Q8_1_MMQ 8
// contiguous v/x values
static __device__ __forceinline__ float vec_dot_q6_K_q8_1_impl_mmvq(
const int & vl, const int & vh, const int * __restrict__ u, const int8_t * __restrict__ scales,
const float & d, const float * __restrict__ d8) {
float sumf = 0.0f;
#pragma unroll
for (int i = 0; i < QR6_K; ++i) {
const int sc = scales[4*i];
const int vil = (vl >> (4*i)) & 0x0F0F0F0F;
const int vih = ((vh >> (4*i)) << 4) & 0x30303030;
const int vi = __vsubss4((vil | vih), 0x20202020); // vi = (vil | vih) - 32
sumf += d8[i] * (ggml_cuda_dp4a(vi, u[i], 0) * sc); // SIMD dot product
}
return d*sumf;
}
// contiguous u/y values
static __device__ __forceinline__ float vec_dot_q6_K_q8_1_impl_mmq(
const int * __restrict__ v, const int * __restrict__ u, const int8_t * __restrict__ sc,
const float & d6, const float * __restrict__ d8) {
float sumf_d = 0.0f;
#pragma unroll
for (int i0 = 0; i0 < VDR_Q6_K_Q8_1_MMQ; i0 += 4) {
int2 sumi_d = {0, 0}; // 2 q6_K scales per q8_1 scale
#pragma unroll
for (int i = i0; i < i0 + 2; ++i) {
sumi_d.x = ggml_cuda_dp4a(v[2*i+0], u[2*i+0], sumi_d.x); // SIMD dot product
sumi_d.x = ggml_cuda_dp4a(v[2*i+1], u[2*i+1], sumi_d.x); // SIMD dot product
sumi_d.y = ggml_cuda_dp4a(v[2*i+4], u[2*i+4], sumi_d.y); // SIMD dot product
sumi_d.y = ggml_cuda_dp4a(v[2*i+5], u[2*i+5], sumi_d.y); // SIMD dot product
}
sumf_d += d8[i0/4] * (sc[i0/2+0]*sumi_d.x + sc[i0/2+1]*sumi_d.y);
}
return d6 * sumf_d;
}
static __device__ __forceinline__ float vec_dot_q4_0_q8_1(
const void * __restrict__ vbq, const block_q8_1 * __restrict__ bq8_1, const int & iqs) {
const block_q4_0 * bq4_0 = (const block_q4_0 *) vbq;
int v[VDR_Q4_0_Q8_1_MMVQ];
int u[2*VDR_Q4_0_Q8_1_MMVQ];
#pragma unroll
for (int i = 0; i < VDR_Q4_0_Q8_1_MMVQ; ++i) {
v[i] = get_int_from_uint8(bq4_0->qs, iqs + i);
u[2*i+0] = get_int_from_int8_aligned(bq8_1->qs, iqs + i);
u[2*i+1] = get_int_from_int8_aligned(bq8_1->qs, iqs + i + QI4_0);
}
return vec_dot_q4_0_q8_1_impl<VDR_Q4_0_Q8_1_MMVQ>(v, u, bq4_0->d, bq8_1->ds);
}
static __device__ __forceinline__ float vec_dot_q4_1_q8_1(
const void * __restrict__ vbq, const block_q8_1 * __restrict__ bq8_1, const int & iqs) {
const block_q4_1 * bq4_1 = (const block_q4_1 *) vbq;
int v[VDR_Q4_1_Q8_1_MMVQ];
int u[2*VDR_Q4_1_Q8_1_MMVQ];
#pragma unroll
for (int i = 0; i < VDR_Q4_1_Q8_1_MMVQ; ++i) {
v[i] = get_int_from_uint8_aligned(bq4_1->qs, iqs + i);
u[2*i+0] = get_int_from_int8_aligned(bq8_1->qs, iqs + i);
u[2*i+1] = get_int_from_int8_aligned(bq8_1->qs, iqs + i + QI4_1);
}
return vec_dot_q4_1_q8_1_impl<VDR_Q4_1_Q8_1_MMVQ>(v, u, bq4_1->dm, bq8_1->ds);
}
static __device__ __forceinline__ float vec_dot_q5_0_q8_1(
const void * __restrict__ vbq, const block_q8_1 * __restrict__ bq8_1, const int & iqs) {
const block_q5_0 * bq5_0 = (const block_q5_0 *) vbq;
int vl[VDR_Q5_0_Q8_1_MMVQ];
int vh[VDR_Q5_0_Q8_1_MMVQ];
int u[2*VDR_Q5_0_Q8_1_MMVQ];
#pragma unroll
for (int i = 0; i < VDR_Q5_0_Q8_1_MMVQ; ++i) {
vl[i] = get_int_from_uint8(bq5_0->qs, iqs + i);
vh[i] = get_int_from_uint8(bq5_0->qh, 0) >> (4 * (iqs + i));
u[2*i+0] = get_int_from_int8_aligned(bq8_1->qs, iqs + i);
u[2*i+1] = get_int_from_int8_aligned(bq8_1->qs, iqs + i + QI5_0);
}
return vec_dot_q5_0_q8_1_impl<VDR_Q5_0_Q8_1_MMVQ>(vl, vh, u, bq5_0->d, bq8_1->ds);
}
static __device__ __forceinline__ float vec_dot_q5_1_q8_1(
const void * __restrict__ vbq, const block_q8_1 * __restrict__ bq8_1, const int & iqs) {
const block_q5_1 * bq5_1 = (const block_q5_1 *) vbq;
int vl[VDR_Q5_1_Q8_1_MMVQ];
int vh[VDR_Q5_1_Q8_1_MMVQ];
int u[2*VDR_Q5_1_Q8_1_MMVQ];
#pragma unroll
for (int i = 0; i < VDR_Q5_1_Q8_1_MMVQ; ++i) {
vl[i] = get_int_from_uint8_aligned(bq5_1->qs, iqs + i);
vh[i] = get_int_from_uint8_aligned(bq5_1->qh, 0) >> (4 * (iqs + i));
u[2*i+0] = get_int_from_int8_aligned(bq8_1->qs, iqs + i);
u[2*i+1] = get_int_from_int8_aligned(bq8_1->qs, iqs + i + QI5_1);
}
return vec_dot_q5_1_q8_1_impl<VDR_Q5_1_Q8_1_MMVQ>(vl, vh, u, bq5_1->dm, bq8_1->ds);
}
static __device__ __forceinline__ float vec_dot_q8_0_q8_1(
const void * __restrict__ vbq, const block_q8_1 * __restrict__ bq8_1, const int & iqs) {
const block_q8_0 * bq8_0 = (const block_q8_0 *) vbq;
int v[VDR_Q8_0_Q8_1_MMVQ];
int u[VDR_Q8_0_Q8_1_MMVQ];
#pragma unroll
for (int i = 0; i < VDR_Q8_0_Q8_1_MMVQ; ++i) {
v[i] = get_int_from_int8(bq8_0->qs, iqs + i);
u[i] = get_int_from_int8_aligned(bq8_1->qs, iqs + i);
}
return vec_dot_q8_0_q8_1_impl<VDR_Q8_0_Q8_1_MMVQ>(v, u, bq8_0->d, __low2half(bq8_1->ds));
}
static __device__ __forceinline__ float vec_dot_q2_K_q8_1(
const void * __restrict__ vbq, const block_q8_1 * __restrict__ bq8_1, const int & iqs) {
const block_q2_K * bq2_K = (const block_q2_K *) vbq;
const int bq8_offset = QR2_K * (iqs / QI8_1);
const int scale_offset = iqs - iqs % QI8_1 + (iqs % QI8_1) / (QI8_1/2);
const uint8_t * scales = bq2_K->scales + scale_offset;
const int v = get_int_from_uint8_aligned(bq2_K->qs, iqs);
int u[QR2_K];
float d8[QR2_K];
#pragma unroll
for (int i = 0; i < QR2_K; ++ i) {
u[i] = get_int_from_int8_aligned(bq8_1[bq8_offset + i].qs, iqs % QI8_1);
d8[i] = __low2float(bq8_1[bq8_offset + i].ds);
}
return vec_dot_q2_K_q8_1_impl_mmvq(v, u, scales, bq2_K->dm, d8);
}
static __device__ __forceinline__ float vec_dot_q3_K_q8_1(
const void * __restrict__ vbq, const block_q8_1 * __restrict__ bq8_1, const int & iqs) {
const block_q3_K * bq3_K = (const block_q3_K *) vbq;
const int bq8_offset = QR3_K * (iqs / (QI3_K/2));
const int scale_offset = iqs - iqs % QI8_1 + (iqs % QI8_1) / (QI8_1/2);
const float d = bq3_K->d;
const int vl = get_int_from_uint8(bq3_K->qs, iqs);
// invert the mask with ~ so that a 0/1 results in 4/0 being subtracted
const int vh = ~get_int_from_uint8(bq3_K->hmask, iqs % (QI3_K/2)) >> bq8_offset;
int u[QR3_K];
float d8[QR3_K];
#pragma unroll
for (int i = 0; i < QR3_K; ++i) {
u[i] = get_int_from_int8_aligned(bq8_1[bq8_offset + i].qs, iqs % QI8_1);
d8[i] = __low2float(bq8_1[bq8_offset + i].ds);
}
return vec_dot_q3_K_q8_1_impl_mmvq(vl, vh, u, bq3_K->scales, scale_offset, d, d8);
}
static __device__ __forceinline__ float vec_dot_q4_K_q8_1(
const void * __restrict__ vbq, const block_q8_1 * __restrict__ bq8_1, const int & iqs) {
#ifndef GGML_QKK_64
const block_q4_K * bq4_K = (const block_q4_K *) vbq;
int v[2];
int u[2*QR4_K];
float d8[QR4_K];
// iqs is in 0,2..30. bq8_offset = iqs/4 -> bq8_offset = 0, 2, 4, 6
const int bq8_offset = QR4_K * ((iqs/2) / (QI8_1/2));
// iqs = 0....3 -> bq8_offset = 0, want q4_offset = 0, 4, 8, 12
// iqs = 4....7 -> bq8_offset = 2, want q4_offset = 32, 36, 40, 44
// iqs = 8...11 -> bq8_offset = 4, want q4_offset = 64, 68, 72, 76
// iqs = 12..15 -> bq8_offset = 6, want q4_offset = 96, 100, 104, 108
const int * q4 = (const int *)(bq4_K->qs + 16 * bq8_offset + 4 * ((iqs/2)%4));
v[0] = q4[0];
v[1] = q4[4];
const uint16_t * scales = (const uint16_t *)bq4_K->scales;
uint16_t aux[2];
const int j = bq8_offset/2;
if (j < 2) {
aux[0] = scales[j+0] & 0x3f3f;
aux[1] = scales[j+2] & 0x3f3f;
} else {
aux[0] = ((scales[j+2] >> 0) & 0x0f0f) | ((scales[j-2] & 0xc0c0) >> 2);
aux[1] = ((scales[j+2] >> 4) & 0x0f0f) | ((scales[j-0] & 0xc0c0) >> 2);
}
const uint8_t * sc = (const uint8_t *)aux;
const uint8_t * m = sc + 2;
for (int i = 0; i < QR4_K; ++i) {
const block_q8_1 * bq8i = bq8_1 + bq8_offset + i;
d8[i] = __low2float(bq8i->ds);
const int * q8 = (const int *)bq8i->qs + ((iqs/2)%4);
u[2*i+0] = q8[0];
u[2*i+1] = q8[4];
}
return vec_dot_q4_K_q8_1_impl_vmmq(v, u, sc, m, bq4_K->dm, d8);
#else
const block_q4_K * bq4_K = (const block_q4_K *) vbq;
float sumf_d = 0.0f;
float sumf_m = 0.0f;
uint16_t aux16[2];
const uint8_t * s = (const uint8_t *)aux16;
const uint16_t * a = (const uint16_t *)bq4_K->scales;
aux16[0] = a[0] & 0x0f0f;
aux16[1] = (a[0] >> 4) & 0x0f0f;
const float dall = bq4_K->dm[0];
const float dmin = bq4_K->dm[1];
const float d8_1 = __low2float(bq8_1[0].ds);
const float d8_2 = __low2float(bq8_1[1].ds);
const int ui1 = *((const int *)bq8_1[0].qs + (iqs/2));
const int ui2 = *((const int *)bq8_1[0].qs + (iqs/2) + 4);
const int ui3 = *((const int *)bq8_1[1].qs + (iqs/2));
const int ui4 = *((const int *)bq8_1[1].qs + (iqs/2) + 4);
const int * q4 = (const int *)bq4_K->qs + (iqs/2);
const int v1 = q4[0];
const int v2 = q4[4];
const int dot1 = ggml_cuda_dp4a(ui2, v2 & 0x0f0f0f0f, ggml_cuda_dp4a(ui1, v1 & 0x0f0f0f0f, 0));
const int dot2 = ggml_cuda_dp4a(ui4, (v2 >> 4) & 0x0f0f0f0f, ggml_cuda_dp4a(ui3, (v1 >> 4) & 0x0f0f0f0f, 0));
const int dot3 = ggml_cuda_dp4a(0x01010101, ui2, ggml_cuda_dp4a(0x01010101, ui1, 0));
const int dot4 = ggml_cuda_dp4a(0x01010101, ui4, ggml_cuda_dp4a(0x01010101, ui3, 0));
sumf_d += d8_1 * (dot1 * s[0]) + d8_2 * (dot2 * s[1]);
sumf_m += d8_1 * (dot3 * s[2]) + d8_2 * (dot4 * s[3]);
return dall * sumf_d - dmin * sumf_m;
#endif
}
static __device__ __forceinline__ float vec_dot_q5_K_q8_1(
const void * __restrict__ vbq, const block_q8_1 * __restrict__ bq8_1, const int & iqs) {
#ifndef GGML_QKK_64
const block_q5_K * bq5_K = (const block_q5_K *) vbq;
int vl[2];
int vh[2];
int u[2*QR5_K];
float d8[QR5_K];
const int bq8_offset = QR5_K * ((iqs/2) / (QI8_1/2));
const int * ql = (const int *)(bq5_K->qs + 16 * bq8_offset + 4 * ((iqs/2)%4));
const int * qh = (const int *)(bq5_K->qh + 4 * ((iqs/2)%4));
vl[0] = ql[0];
vl[1] = ql[4];
vh[0] = qh[0] >> bq8_offset;
vh[1] = qh[4] >> bq8_offset;
const uint16_t * scales = (const uint16_t *)bq5_K->scales;
uint16_t aux[2];
const int j = bq8_offset/2;
if (j < 2) {
aux[0] = scales[j+0] & 0x3f3f;
aux[1] = scales[j+2] & 0x3f3f;
} else {
aux[0] = ((scales[j+2] >> 0) & 0x0f0f) | ((scales[j-2] & 0xc0c0) >> 2);
aux[1] = ((scales[j+2] >> 4) & 0x0f0f) | ((scales[j-0] & 0xc0c0) >> 2);
}
const uint8_t * sc = (const uint8_t *)aux;
const uint8_t * m = sc + 2;
#pragma unroll
for (int i = 0; i < QR5_K; ++i) {
const block_q8_1 * bq8i = bq8_1 + bq8_offset + i;
d8[i] = __low2float(bq8i->ds);
const int * q8 = (const int *)bq8i->qs + ((iqs/2)%4);
u[2*i+0] = q8[0];
u[2*i+1] = q8[4];
}
return vec_dot_q5_K_q8_1_impl_vmmq(vl, vh, u, sc, m, bq5_K->dm, d8);
#else
const block_q5_K * bq5_K = (const block_q5_K *) vbq;
const int8_t * s = bq5_K->scales;
const float d = bq5_K->d;
const float d8_1 = __low2half(bq8_1[0].ds);
const float d8_2 = __low2half(bq8_1[1].ds);
const int ui1 = *((const int *)bq8_1[0].qs + (iqs/2));
const int ui2 = *((const int *)bq8_1[0].qs + (iqs/2) + 4);
const int ui3 = *((const int *)bq8_1[1].qs + (iqs/2));
const int ui4 = *((const int *)bq8_1[1].qs + (iqs/2) + 4);
const int * ql = (const int *)bq5_K->qs + (iqs/2);
const int vl1 = ql[0];
const int vl2 = ql[4];
const int step = 4 * (iqs/2); // 0, 4, 8, 12
const int im = step/8; // = 0 for iqs = 0, 2, = 1 for iqs = 4, 6
const int in = step%8; // 0, 4, 0, 4
const int vh = (*((const int *)(bq5_K->qh + in))) >> im;
const int v1 = (((vh << 4) & 0x10101010) ^ 0x10101010) | ((vl1 >> 0) & 0x0f0f0f0f);
const int v2 = (((vh << 2) & 0x10101010) ^ 0x10101010) | ((vl2 >> 0) & 0x0f0f0f0f);
const int v3 = (((vh >> 0) & 0x10101010) ^ 0x10101010) | ((vl1 >> 4) & 0x0f0f0f0f);
const int v4 = (((vh >> 2) & 0x10101010) ^ 0x10101010) | ((vl2 >> 4) & 0x0f0f0f0f);
const float sumf_d = d8_1 * (ggml_cuda_dp4a(ui1, v1, 0) * s[0] + ggml_cuda_dp4a(ui2, v2, 0) * s[1])
+ d8_2 * (ggml_cuda_dp4a(ui3, v3, 0) * s[2] + ggml_cuda_dp4a(ui4, v4, 0) * s[3]);
return d * sumf_d;
#endif
}
static __device__ __forceinline__ float vec_dot_q6_K_q8_1(
const void * __restrict__ vbq, const block_q8_1 * __restrict__ bq8_1, const int & iqs) {
const block_q6_K * bq6_K = (const block_q6_K *) vbq;
const int bq8_offset = 2 * QR6_K * (iqs / (QI6_K/2)) + (iqs % (QI6_K/2)) / (QI6_K/4);
const int scale_offset = (QI6_K/4) * (iqs / (QI6_K/2)) + (iqs % (QI6_K/2)) / (QI6_K/8);
const int vh_shift = 2 * ((iqs % (QI6_K/2)) / (QI6_K/4));
const int vl = get_int_from_uint8(bq6_K->ql, iqs);
const int vh = get_int_from_uint8(bq6_K->qh, (QI6_K/4) * (iqs / (QI6_K/2)) + iqs % (QI6_K/4)) >> vh_shift;
const int8_t * scales = bq6_K->scales + scale_offset;
int u[QR6_K];
float d8[QR6_K];
#pragma unroll
for (int i = 0; i < QR6_K; ++i) {
u[i] = get_int_from_int8_aligned(bq8_1[bq8_offset + 2*i].qs, iqs % QI8_1);
d8[i] = __low2float(bq8_1[bq8_offset + 2*i].ds);
}
return vec_dot_q6_K_q8_1_impl_mmvq(vl, vh, u, scales, bq6_K->d, d8);
}
// https://github.com/ggerganov/llama.cpp/blob/c50a82ce0f71558cbb8e555146ba124251504b38/ggml-cuda/mmvq.cu#L4
typedef float (*vec_dot_q_cuda_t)(const void * __restrict__ vbq, const block_q8_1 * __restrict__ bq8_1, const int & iqs);
template <int ncols_y, int qk, int qi, typename block_q_t, int vdr, vec_dot_q_cuda_t vec_dot_q_cuda>
static __device__ void mul_mat_vec_q(
const void * __restrict__ vx, const void * __restrict__ vy, float * __restrict__ dst,
const int ncols_x, const int nrows_x, const int nrows_y, const int nrows_dst) {
#if defined(GGML_USE_HIPBLAS) && defined(__HIP_PLATFORM_AMD__) && (defined(RDNA2) || defined(RDNA3))
constexpr int nwarps = 1;
constexpr int rows_per_cuda_block = 1;
#else
constexpr int nwarps = ncols_y <= 4 ? 4 : 2;
constexpr int rows_per_cuda_block = ncols_y == 1 ? 1 : 2;
#endif // defined(GGML_USE_HIPBLAS) && defined(__HIP_PLATFORM_AMD__) && !defined(RDNA2) && !defined(RDNA3)
const int tid = WARP_SIZE*threadIdx.y + threadIdx.x;
const int row0 = rows_per_cuda_block*blockIdx.x;
const int blocks_per_row_x = ncols_x / qk;
const int blocks_per_col_y = nrows_y / QK8_1;
constexpr int blocks_per_iter = vdr * nwarps*WARP_SIZE / qi;
// partial sum for each thread
float tmp[ncols_y][rows_per_cuda_block] = {0.0f};
const block_q_t * x = (const block_q_t *) vx;
const block_q8_1 * y = (const block_q8_1 *) vy;
for (int kbx = tid / (qi/vdr); kbx < blocks_per_row_x; kbx += blocks_per_iter) {
const int kby = kbx * (qk/QK8_1); // y block index that aligns with kbx
// x block quant index when casting the quants to int
const int kqs = vdr * (tid % (qi/vdr));
#pragma unroll
for (int j = 0; j < ncols_y; ++j) {
#pragma unroll
for (int i = 0; i < rows_per_cuda_block; ++i) {
tmp[j][i] += vec_dot_q_cuda(
&x[kbx + (row0 + i)*blocks_per_row_x], &y[j*blocks_per_col_y + kby], kqs);
}
}
}
__shared__ float tmp_shared[nwarps-1 > 0 ? nwarps-1 : 1][ncols_y][rows_per_cuda_block][WARP_SIZE];
if (threadIdx.y > 0) {
#pragma unroll
for (int j = 0; j < ncols_y; ++j) {
#pragma unroll
for (int i = 0; i < rows_per_cuda_block; ++i) {
tmp_shared[threadIdx.y-1][j][i][threadIdx.x] = tmp[j][i];
}
}
}
__syncthreads();
if (threadIdx.y > 0) {
return;
}
// sum up partial sums and write back result
#pragma unroll
for (int j = 0; j < ncols_y; ++j) {
#pragma unroll
for (int i = 0; i < rows_per_cuda_block; ++i) {
#pragma unroll
for (int l = 0; l < nwarps-1; ++l) {
tmp[j][i] += tmp_shared[l][j][i][threadIdx.x];
}
tmp[j][i] = warp_reduce_sum(tmp[j][i]);
}
if (threadIdx.x < rows_per_cuda_block) {
dst[j*nrows_dst + row0 + threadIdx.x] = tmp[j][threadIdx.x];
}
}
}
// batch size = 1
extern "C" __global__ void mul_mat_vec_q4_0_q8_1_cuda1(
const void * vx, const void * vy, float * dst,
const int ncols_x, const int nrows_x, const int nrows_y, const int nrows_dst) {
mul_mat_vec_q<1, QK4_0, QI4_0, block_q4_0, VDR_Q4_0_Q8_1_MMVQ, vec_dot_q4_0_q8_1>
(vx, vy, dst, ncols_x, nrows_x, nrows_y, nrows_dst);
}
extern "C" __global__ void mul_mat_vec_q4_1_q8_1_cuda1(
const void * vx, const void * vy, float * dst,
const int ncols_x, const int nrows_x, const int nrows_y, const int nrows_dst) {
mul_mat_vec_q<1, QK4_1, QI4_1, block_q4_1, VDR_Q4_1_Q8_1_MMVQ, vec_dot_q4_1_q8_1>
(vx, vy, dst, ncols_x, nrows_x, nrows_y, nrows_dst);
}
extern "C" __global__ void mul_mat_vec_q5_0_q8_1_cuda1(
const void * vx, const void * vy, float * dst,
const int ncols_x, const int nrows_x, const int nrows_y, const int nrows_dst) {
mul_mat_vec_q<1, QK5_0, QI5_0, block_q5_0, VDR_Q5_0_Q8_1_MMVQ, vec_dot_q5_0_q8_1>
(vx, vy, dst, ncols_x, nrows_x, nrows_y, nrows_dst);
}
extern "C" __global__ void mul_mat_vec_q5_1_q8_1_cuda1(
const void * vx, const void * vy, float * dst,
const int ncols_x, const int nrows_x, const int nrows_y, const int nrows_dst) {
mul_mat_vec_q<1, QK5_1, QI5_1, block_q5_1, VDR_Q5_1_Q8_1_MMVQ, vec_dot_q5_1_q8_1>
(vx, vy, dst, ncols_x, nrows_x, nrows_y, nrows_dst);
}
extern "C" __global__ void mul_mat_vec_q8_0_q8_1_cuda1(
const void * vx, const void * vy, float * dst,
const int ncols_x, const int nrows_x, const int nrows_y, const int nrows_dst) {
mul_mat_vec_q<1, QK8_0, QI8_0, block_q8_0, VDR_Q8_0_Q8_1_MMVQ, vec_dot_q8_0_q8_1>
(vx, vy, dst, ncols_x, nrows_x, nrows_y, nrows_dst);
}
extern "C" __global__ void mul_mat_vec_q2_K_q8_1_cuda1(
const void * vx, const void * vy, float * dst,
const int ncols_x, const int nrows_x, const int nrows_y, const int nrows_dst) {
mul_mat_vec_q<1, QK_K, QI2_K, block_q2_K, VDR_Q2_K_Q8_1_MMVQ, vec_dot_q2_K_q8_1>
(vx, vy, dst, ncols_x, nrows_x, nrows_y, nrows_dst);
}
extern "C" __global__ void mul_mat_vec_q3_K_q8_1_cuda1(
const void * vx, const void * vy, float * dst,
const int ncols_x, const int nrows_x, const int nrows_y, const int nrows_dst) {
mul_mat_vec_q<1, QK_K, QI3_K, block_q3_K, VDR_Q3_K_Q8_1_MMVQ, vec_dot_q3_K_q8_1>
(vx, vy, dst, ncols_x, nrows_x, nrows_y, nrows_dst);
}
extern "C" __global__ void mul_mat_vec_q4_K_q8_1_cuda1(
const void * vx, const void * vy, float * dst,
const int ncols_x, const int nrows_x, const int nrows_y, const int nrows_dst) {
mul_mat_vec_q<1, QK_K, QI4_K, block_q4_K, VDR_Q4_K_Q8_1_MMVQ, vec_dot_q4_K_q8_1>
(vx, vy, dst, ncols_x, nrows_x, nrows_y, nrows_dst);
}
extern "C" __global__ void mul_mat_vec_q5_K_q8_1_cuda1(
const void * vx, const void * vy, float * dst,
const int ncols_x, const int nrows_x, const int nrows_y, const int nrows_dst) {
mul_mat_vec_q<1, QK_K, QI5_K, block_q5_K, VDR_Q5_K_Q8_1_MMVQ, vec_dot_q5_K_q8_1>
(vx, vy, dst, ncols_x, nrows_x, nrows_y, nrows_dst);
}
extern "C" __global__ void mul_mat_vec_q6_K_q8_1_cuda1(
const void * vx, const void * vy, float * dst,
const int ncols_x, const int nrows_x, const int nrows_y, const int nrows_dst) {
mul_mat_vec_q<1, QK_K, QI6_K, block_q6_K, VDR_Q6_K_Q8_1_MMVQ, vec_dot_q6_K_q8_1>
(vx, vy, dst, ncols_x, nrows_x, nrows_y, nrows_dst);
}
// batch size = 2
extern "C" __global__ void mul_mat_vec_q4_0_q8_1_cuda2(
const void * vx, const void * vy, float * dst,
const int ncols_x, const int nrows_x, const int nrows_y, const int nrows_dst) {
mul_mat_vec_q<2, QK4_0, QI4_0, block_q4_0, VDR_Q4_0_Q8_1_MMVQ, vec_dot_q4_0_q8_1>
(vx, vy, dst, ncols_x, nrows_x, nrows_y, nrows_dst);
}
extern "C" __global__ void mul_mat_vec_q4_1_q8_1_cuda2(
const void * vx, const void * vy, float * dst,
const int ncols_x, const int nrows_x, const int nrows_y, const int nrows_dst) {
mul_mat_vec_q<2, QK4_1, QI4_1, block_q4_1, VDR_Q4_1_Q8_1_MMVQ, vec_dot_q4_1_q8_1>
(vx, vy, dst, ncols_x, nrows_x, nrows_y, nrows_dst);
}
extern "C" __global__ void mul_mat_vec_q5_0_q8_1_cuda2(
const void * vx, const void * vy, float * dst,
const int ncols_x, const int nrows_x, const int nrows_y, const int nrows_dst) {
mul_mat_vec_q<2, QK5_0, QI5_0, block_q5_0, VDR_Q5_0_Q8_1_MMVQ, vec_dot_q5_0_q8_1>
(vx, vy, dst, ncols_x, nrows_x, nrows_y, nrows_dst);
}
extern "C" __global__ void mul_mat_vec_q5_1_q8_1_cuda2(
const void * vx, const void * vy, float * dst,
const int ncols_x, const int nrows_x, const int nrows_y, const int nrows_dst) {
mul_mat_vec_q<2, QK5_1, QI5_1, block_q5_1, VDR_Q5_1_Q8_1_MMVQ, vec_dot_q5_1_q8_1>
(vx, vy, dst, ncols_x, nrows_x, nrows_y, nrows_dst);
}
extern "C" __global__ void mul_mat_vec_q8_0_q8_1_cuda2(
const void * vx, const void * vy, float * dst,
const int ncols_x, const int nrows_x, const int nrows_y, const int nrows_dst) {
mul_mat_vec_q<2, QK8_0, QI8_0, block_q8_0, VDR_Q8_0_Q8_1_MMVQ, vec_dot_q8_0_q8_1>
(vx, vy, dst, ncols_x, nrows_x, nrows_y, nrows_dst);
}
extern "C" __global__ void mul_mat_vec_q2_K_q8_1_cuda2(
const void * vx, const void * vy, float * dst,
const int ncols_x, const int nrows_x, const int nrows_y, const int nrows_dst) {
mul_mat_vec_q<2, QK_K, QI2_K, block_q2_K, VDR_Q2_K_Q8_1_MMVQ, vec_dot_q2_K_q8_1>
(vx, vy, dst, ncols_x, nrows_x, nrows_y, nrows_dst);
}
extern "C" __global__ void mul_mat_vec_q3_K_q8_1_cuda2(
const void * vx, const void * vy, float * dst,
const int ncols_x, const int nrows_x, const int nrows_y, const int nrows_dst) {
mul_mat_vec_q<2, QK_K, QI3_K, block_q3_K, VDR_Q3_K_Q8_1_MMVQ, vec_dot_q3_K_q8_1>
(vx, vy, dst, ncols_x, nrows_x, nrows_y, nrows_dst);
}
extern "C" __global__ void mul_mat_vec_q4_K_q8_1_cuda2(
const void * vx, const void * vy, float * dst,
const int ncols_x, const int nrows_x, const int nrows_y, const int nrows_dst) {
mul_mat_vec_q<2, QK_K, QI4_K, block_q4_K, VDR_Q4_K_Q8_1_MMVQ, vec_dot_q4_K_q8_1>
(vx, vy, dst, ncols_x, nrows_x, nrows_y, nrows_dst);
}
extern "C" __global__ void mul_mat_vec_q5_K_q8_1_cuda2(
const void * vx, const void * vy, float * dst,
const int ncols_x, const int nrows_x, const int nrows_y, const int nrows_dst) {
mul_mat_vec_q<2, QK_K, QI5_K, block_q5_K, VDR_Q5_K_Q8_1_MMVQ, vec_dot_q5_K_q8_1>
(vx, vy, dst, ncols_x, nrows_x, nrows_y, nrows_dst);
}
extern "C" __global__ void mul_mat_vec_q6_K_q8_1_cuda2(
const void * vx, const void * vy, float * dst,
const int ncols_x, const int nrows_x, const int nrows_y, const int nrows_dst) {
mul_mat_vec_q<2, QK_K, QI6_K, block_q6_K, VDR_Q6_K_Q8_1_MMVQ, vec_dot_q6_K_q8_1>
(vx, vy, dst, ncols_x, nrows_x, nrows_y, nrows_dst);
}
// batch size = 3
extern "C" __global__ void mul_mat_vec_q4_0_q8_1_cuda3(
const void * vx, const void * vy, float * dst,
const int ncols_x, const int nrows_x, const int nrows_y, const int nrows_dst) {
mul_mat_vec_q<3, QK4_0, QI4_0, block_q4_0, VDR_Q4_0_Q8_1_MMVQ, vec_dot_q4_0_q8_1>
(vx, vy, dst, ncols_x, nrows_x, nrows_y, nrows_dst);
}
extern "C" __global__ void mul_mat_vec_q4_1_q8_1_cuda3(
const void * vx, const void * vy, float * dst,
const int ncols_x, const int nrows_x, const int nrows_y, const int nrows_dst) {
mul_mat_vec_q<3, QK4_1, QI4_1, block_q4_1, VDR_Q4_1_Q8_1_MMVQ, vec_dot_q4_1_q8_1>
(vx, vy, dst, ncols_x, nrows_x, nrows_y, nrows_dst);
}
extern "C" __global__ void mul_mat_vec_q5_0_q8_1_cuda3(
const void * vx, const void * vy, float * dst,
const int ncols_x, const int nrows_x, const int nrows_y, const int nrows_dst) {
mul_mat_vec_q<3, QK5_0, QI5_0, block_q5_0, VDR_Q5_0_Q8_1_MMVQ, vec_dot_q5_0_q8_1>
(vx, vy, dst, ncols_x, nrows_x, nrows_y, nrows_dst);
}
extern "C" __global__ void mul_mat_vec_q5_1_q8_1_cuda3(
const void * vx, const void * vy, float * dst,
const int ncols_x, const int nrows_x, const int nrows_y, const int nrows_dst) {
mul_mat_vec_q<3, QK5_1, QI5_1, block_q5_1, VDR_Q5_1_Q8_1_MMVQ, vec_dot_q5_1_q8_1>
(vx, vy, dst, ncols_x, nrows_x, nrows_y, nrows_dst);
}
extern "C" __global__ void mul_mat_vec_q8_0_q8_1_cuda3(
const void * vx, const void * vy, float * dst,
const int ncols_x, const int nrows_x, const int nrows_y, const int nrows_dst) {
mul_mat_vec_q<3, QK8_0, QI8_0, block_q8_0, VDR_Q8_0_Q8_1_MMVQ, vec_dot_q8_0_q8_1>
(vx, vy, dst, ncols_x, nrows_x, nrows_y, nrows_dst);
}
extern "C" __global__ void mul_mat_vec_q2_K_q8_1_cuda3(
const void * vx, const void * vy, float * dst,
const int ncols_x, const int nrows_x, const int nrows_y, const int nrows_dst) {
mul_mat_vec_q<3, QK_K, QI2_K, block_q2_K, VDR_Q2_K_Q8_1_MMVQ, vec_dot_q2_K_q8_1>
(vx, vy, dst, ncols_x, nrows_x, nrows_y, nrows_dst);
}
extern "C" __global__ void mul_mat_vec_q3_K_q8_1_cuda3(
const void * vx, const void * vy, float * dst,
const int ncols_x, const int nrows_x, const int nrows_y, const int nrows_dst) {
mul_mat_vec_q<3, QK_K, QI3_K, block_q3_K, VDR_Q3_K_Q8_1_MMVQ, vec_dot_q3_K_q8_1>
(vx, vy, dst, ncols_x, nrows_x, nrows_y, nrows_dst);
}
extern "C" __global__ void mul_mat_vec_q4_K_q8_1_cuda3(
const void * vx, const void * vy, float * dst,
const int ncols_x, const int nrows_x, const int nrows_y, const int nrows_dst) {
mul_mat_vec_q<3, QK_K, QI4_K, block_q4_K, VDR_Q4_K_Q8_1_MMVQ, vec_dot_q4_K_q8_1>
(vx, vy, dst, ncols_x, nrows_x, nrows_y, nrows_dst);
}
extern "C" __global__ void mul_mat_vec_q5_K_q8_1_cuda3(
const void * vx, const void * vy, float * dst,
const int ncols_x, const int nrows_x, const int nrows_y, const int nrows_dst) {
mul_mat_vec_q<3, QK_K, QI5_K, block_q5_K, VDR_Q5_K_Q8_1_MMVQ, vec_dot_q5_K_q8_1>
(vx, vy, dst, ncols_x, nrows_x, nrows_y, nrows_dst);
}
extern "C" __global__ void mul_mat_vec_q6_K_q8_1_cuda3(
const void * vx, const void * vy, float * dst,
const int ncols_x, const int nrows_x, const int nrows_y, const int nrows_dst) {
mul_mat_vec_q<3, QK_K, QI6_K, block_q6_K, VDR_Q6_K_Q8_1_MMVQ, vec_dot_q6_K_q8_1>
(vx, vy, dst, ncols_x, nrows_x, nrows_y, nrows_dst);
}
// batch size = 4
extern "C" __global__ void mul_mat_vec_q4_0_q8_1_cuda4(
const void * vx, const void * vy, float * dst,
const int ncols_x, const int nrows_x, const int nrows_y, const int nrows_dst) {
mul_mat_vec_q<4, QK4_0, QI4_0, block_q4_0, VDR_Q4_0_Q8_1_MMVQ, vec_dot_q4_0_q8_1>
(vx, vy, dst, ncols_x, nrows_x, nrows_y, nrows_dst);
}
extern "C" __global__ void mul_mat_vec_q4_1_q8_1_cuda4(
const void * vx, const void * vy, float * dst,
const int ncols_x, const int nrows_x, const int nrows_y, const int nrows_dst) {
mul_mat_vec_q<4, QK4_1, QI4_1, block_q4_1, VDR_Q4_1_Q8_1_MMVQ, vec_dot_q4_1_q8_1>
(vx, vy, dst, ncols_x, nrows_x, nrows_y, nrows_dst);
}
extern "C" __global__ void mul_mat_vec_q5_0_q8_1_cuda4(
const void * vx, const void * vy, float * dst,
const int ncols_x, const int nrows_x, const int nrows_y, const int nrows_dst) {
mul_mat_vec_q<4, QK5_0, QI5_0, block_q5_0, VDR_Q5_0_Q8_1_MMVQ, vec_dot_q5_0_q8_1>
(vx, vy, dst, ncols_x, nrows_x, nrows_y, nrows_dst);
}
extern "C" __global__ void mul_mat_vec_q5_1_q8_1_cuda4(
const void * vx, const void * vy, float * dst,
const int ncols_x, const int nrows_x, const int nrows_y, const int nrows_dst) {
mul_mat_vec_q<4, QK5_1, QI5_1, block_q5_1, VDR_Q5_1_Q8_1_MMVQ, vec_dot_q5_1_q8_1>
(vx, vy, dst, ncols_x, nrows_x, nrows_y, nrows_dst);
}
extern "C" __global__ void mul_mat_vec_q8_0_q8_1_cuda4(
const void * vx, const void * vy, float * dst,
const int ncols_x, const int nrows_x, const int nrows_y, const int nrows_dst) {
mul_mat_vec_q<4, QK8_0, QI8_0, block_q8_0, VDR_Q8_0_Q8_1_MMVQ, vec_dot_q8_0_q8_1>
(vx, vy, dst, ncols_x, nrows_x, nrows_y, nrows_dst);
}
extern "C" __global__ void mul_mat_vec_q2_K_q8_1_cuda4(
const void * vx, const void * vy, float * dst,
const int ncols_x, const int nrows_x, const int nrows_y, const int nrows_dst) {
mul_mat_vec_q<4, QK_K, QI2_K, block_q2_K, VDR_Q2_K_Q8_1_MMVQ, vec_dot_q2_K_q8_1>
(vx, vy, dst, ncols_x, nrows_x, nrows_y, nrows_dst);
}
extern "C" __global__ void mul_mat_vec_q3_K_q8_1_cuda4(
const void * vx, const void * vy, float * dst,
const int ncols_x, const int nrows_x, const int nrows_y, const int nrows_dst) {
mul_mat_vec_q<4, QK_K, QI3_K, block_q3_K, VDR_Q3_K_Q8_1_MMVQ, vec_dot_q3_K_q8_1>
(vx, vy, dst, ncols_x, nrows_x, nrows_y, nrows_dst);
}
extern "C" __global__ void mul_mat_vec_q4_K_q8_1_cuda4(
const void * vx, const void * vy, float * dst,
const int ncols_x, const int nrows_x, const int nrows_y, const int nrows_dst) {
mul_mat_vec_q<4, QK_K, QI4_K, block_q4_K, VDR_Q4_K_Q8_1_MMVQ, vec_dot_q4_K_q8_1>
(vx, vy, dst, ncols_x, nrows_x, nrows_y, nrows_dst);
}
extern "C" __global__ void mul_mat_vec_q5_K_q8_1_cuda4(
const void * vx, const void * vy, float * dst,
const int ncols_x, const int nrows_x, const int nrows_y, const int nrows_dst) {
mul_mat_vec_q<4, QK_K, QI5_K, block_q5_K, VDR_Q5_K_Q8_1_MMVQ, vec_dot_q5_K_q8_1>
(vx, vy, dst, ncols_x, nrows_x, nrows_y, nrows_dst);
}
extern "C" __global__ void mul_mat_vec_q6_K_q8_1_cuda4(
const void * vx, const void * vy, float * dst,
const int ncols_x, const int nrows_x, const int nrows_y, const int nrows_dst) {
mul_mat_vec_q<4, QK_K, QI6_K, block_q6_K, VDR_Q6_K_Q8_1_MMVQ, vec_dot_q6_K_q8_1>
(vx, vy, dst, ncols_x, nrows_x, nrows_y, nrows_dst);
}
// batch size = 5
extern "C" __global__ void mul_mat_vec_q4_0_q8_1_cuda5(
const void * vx, const void * vy, float * dst,
const int ncols_x, const int nrows_x, const int nrows_y, const int nrows_dst) {
mul_mat_vec_q<5, QK4_0, QI4_0, block_q4_0, VDR_Q4_0_Q8_1_MMVQ, vec_dot_q4_0_q8_1>
(vx, vy, dst, ncols_x, nrows_x, nrows_y, nrows_dst);
}
extern "C" __global__ void mul_mat_vec_q4_1_q8_1_cuda5(
const void * vx, const void * vy, float * dst,
const int ncols_x, const int nrows_x, const int nrows_y, const int nrows_dst) {
mul_mat_vec_q<5, QK4_1, QI4_1, block_q4_1, VDR_Q4_1_Q8_1_MMVQ, vec_dot_q4_1_q8_1>
(vx, vy, dst, ncols_x, nrows_x, nrows_y, nrows_dst);
}
extern "C" __global__ void mul_mat_vec_q5_0_q8_1_cuda5(
const void * vx, const void * vy, float * dst,
const int ncols_x, const int nrows_x, const int nrows_y, const int nrows_dst) {
mul_mat_vec_q<5, QK5_0, QI5_0, block_q5_0, VDR_Q5_0_Q8_1_MMVQ, vec_dot_q5_0_q8_1>
(vx, vy, dst, ncols_x, nrows_x, nrows_y, nrows_dst);
}
extern "C" __global__ void mul_mat_vec_q5_1_q8_1_cuda5(
const void * vx, const void * vy, float * dst,
const int ncols_x, const int nrows_x, const int nrows_y, const int nrows_dst) {
mul_mat_vec_q<5, QK5_1, QI5_1, block_q5_1, VDR_Q5_1_Q8_1_MMVQ, vec_dot_q5_1_q8_1>
(vx, vy, dst, ncols_x, nrows_x, nrows_y, nrows_dst);
}
extern "C" __global__ void mul_mat_vec_q8_0_q8_1_cuda5(
const void * vx, const void * vy, float * dst,
const int ncols_x, const int nrows_x, const int nrows_y, const int nrows_dst) {
mul_mat_vec_q<5, QK8_0, QI8_0, block_q8_0, VDR_Q8_0_Q8_1_MMVQ, vec_dot_q8_0_q8_1>
(vx, vy, dst, ncols_x, nrows_x, nrows_y, nrows_dst);
}
extern "C" __global__ void mul_mat_vec_q2_K_q8_1_cuda5(
const void * vx, const void * vy, float * dst,
const int ncols_x, const int nrows_x, const int nrows_y, const int nrows_dst) {
mul_mat_vec_q<5, QK_K, QI2_K, block_q2_K, VDR_Q2_K_Q8_1_MMVQ, vec_dot_q2_K_q8_1>
(vx, vy, dst, ncols_x, nrows_x, nrows_y, nrows_dst);
}
extern "C" __global__ void mul_mat_vec_q3_K_q8_1_cuda5(
const void * vx, const void * vy, float * dst,
const int ncols_x, const int nrows_x, const int nrows_y, const int nrows_dst) {
mul_mat_vec_q<5, QK_K, QI3_K, block_q3_K, VDR_Q3_K_Q8_1_MMVQ, vec_dot_q3_K_q8_1>
(vx, vy, dst, ncols_x, nrows_x, nrows_y, nrows_dst);
}
extern "C" __global__ void mul_mat_vec_q4_K_q8_1_cuda5(
const void * vx, const void * vy, float * dst,
const int ncols_x, const int nrows_x, const int nrows_y, const int nrows_dst) {
mul_mat_vec_q<5, QK_K, QI4_K, block_q4_K, VDR_Q4_K_Q8_1_MMVQ, vec_dot_q4_K_q8_1>
(vx, vy, dst, ncols_x, nrows_x, nrows_y, nrows_dst);
}
extern "C" __global__ void mul_mat_vec_q5_K_q8_1_cuda5(
const void * vx, const void * vy, float * dst,
const int ncols_x, const int nrows_x, const int nrows_y, const int nrows_dst) {
mul_mat_vec_q<5, QK_K, QI5_K, block_q5_K, VDR_Q5_K_Q8_1_MMVQ, vec_dot_q5_K_q8_1>
(vx, vy, dst, ncols_x, nrows_x, nrows_y, nrows_dst);
}
extern "C" __global__ void mul_mat_vec_q6_K_q8_1_cuda5(
const void * vx, const void * vy, float * dst,
const int ncols_x, const int nrows_x, const int nrows_y, const int nrows_dst) {
mul_mat_vec_q<5, QK_K, QI6_K, block_q6_K, VDR_Q6_K_Q8_1_MMVQ, vec_dot_q6_K_q8_1>
(vx, vy, dst, ncols_x, nrows_x, nrows_y, nrows_dst);
}
// batch size = 6
extern "C" __global__ void mul_mat_vec_q4_0_q8_1_cuda6(
const void * vx, const void * vy, float * dst,
const int ncols_x, const int nrows_x, const int nrows_y, const int nrows_dst) {
mul_mat_vec_q<6, QK4_0, QI4_0, block_q4_0, VDR_Q4_0_Q8_1_MMVQ, vec_dot_q4_0_q8_1>
(vx, vy, dst, ncols_x, nrows_x, nrows_y, nrows_dst);
}
extern "C" __global__ void mul_mat_vec_q4_1_q8_1_cuda6(
const void * vx, const void * vy, float * dst,
const int ncols_x, const int nrows_x, const int nrows_y, const int nrows_dst) {
mul_mat_vec_q<6, QK4_1, QI4_1, block_q4_1, VDR_Q4_1_Q8_1_MMVQ, vec_dot_q4_1_q8_1>
(vx, vy, dst, ncols_x, nrows_x, nrows_y, nrows_dst);
}
extern "C" __global__ void mul_mat_vec_q5_0_q8_1_cuda6(
const void * vx, const void * vy, float * dst,
const int ncols_x, const int nrows_x, const int nrows_y, const int nrows_dst) {
mul_mat_vec_q<6, QK5_0, QI5_0, block_q5_0, VDR_Q5_0_Q8_1_MMVQ, vec_dot_q5_0_q8_1>
(vx, vy, dst, ncols_x, nrows_x, nrows_y, nrows_dst);
}
extern "C" __global__ void mul_mat_vec_q5_1_q8_1_cuda6(
const void * vx, const void * vy, float * dst,
const int ncols_x, const int nrows_x, const int nrows_y, const int nrows_dst) {
mul_mat_vec_q<6, QK5_1, QI5_1, block_q5_1, VDR_Q5_1_Q8_1_MMVQ, vec_dot_q5_1_q8_1>
(vx, vy, dst, ncols_x, nrows_x, nrows_y, nrows_dst);
}
extern "C" __global__ void mul_mat_vec_q8_0_q8_1_cuda6(
const void * vx, const void * vy, float * dst,
const int ncols_x, const int nrows_x, const int nrows_y, const int nrows_dst) {
mul_mat_vec_q<6, QK8_0, QI8_0, block_q8_0, VDR_Q8_0_Q8_1_MMVQ, vec_dot_q8_0_q8_1>
(vx, vy, dst, ncols_x, nrows_x, nrows_y, nrows_dst);
}
extern "C" __global__ void mul_mat_vec_q2_K_q8_1_cuda6(
const void * vx, const void * vy, float * dst,
const int ncols_x, const int nrows_x, const int nrows_y, const int nrows_dst) {
mul_mat_vec_q<6, QK_K, QI2_K, block_q2_K, VDR_Q2_K_Q8_1_MMVQ, vec_dot_q2_K_q8_1>
(vx, vy, dst, ncols_x, nrows_x, nrows_y, nrows_dst);
}
extern "C" __global__ void mul_mat_vec_q3_K_q8_1_cuda6(
const void * vx, const void * vy, float * dst,
const int ncols_x, const int nrows_x, const int nrows_y, const int nrows_dst) {
mul_mat_vec_q<6, QK_K, QI3_K, block_q3_K, VDR_Q3_K_Q8_1_MMVQ, vec_dot_q3_K_q8_1>
(vx, vy, dst, ncols_x, nrows_x, nrows_y, nrows_dst);
}
extern "C" __global__ void mul_mat_vec_q4_K_q8_1_cuda6(
const void * vx, const void * vy, float * dst,
const int ncols_x, const int nrows_x, const int nrows_y, const int nrows_dst) {
mul_mat_vec_q<6, QK_K, QI4_K, block_q4_K, VDR_Q4_K_Q8_1_MMVQ, vec_dot_q4_K_q8_1>
(vx, vy, dst, ncols_x, nrows_x, nrows_y, nrows_dst);
}
extern "C" __global__ void mul_mat_vec_q5_K_q8_1_cuda6(
const void * vx, const void * vy, float * dst,
const int ncols_x, const int nrows_x, const int nrows_y, const int nrows_dst) {
mul_mat_vec_q<6, QK_K, QI5_K, block_q5_K, VDR_Q5_K_Q8_1_MMVQ, vec_dot_q5_K_q8_1>
(vx, vy, dst, ncols_x, nrows_x, nrows_y, nrows_dst);
}
extern "C" __global__ void mul_mat_vec_q6_K_q8_1_cuda6(
const void * vx, const void * vy, float * dst,
const int ncols_x, const int nrows_x, const int nrows_y, const int nrows_dst) {
mul_mat_vec_q<6, QK_K, QI6_K, block_q6_K, VDR_Q6_K_Q8_1_MMVQ, vec_dot_q6_K_q8_1>
(vx, vy, dst, ncols_x, nrows_x, nrows_y, nrows_dst);
}
// batch size = 7
extern "C" __global__ void mul_mat_vec_q4_0_q8_1_cuda7(
const void * vx, const void * vy, float * dst,
const int ncols_x, const int nrows_x, const int nrows_y, const int nrows_dst) {
mul_mat_vec_q<7, QK4_0, QI4_0, block_q4_0, VDR_Q4_0_Q8_1_MMVQ, vec_dot_q4_0_q8_1>
(vx, vy, dst, ncols_x, nrows_x, nrows_y, nrows_dst);
}
extern "C" __global__ void mul_mat_vec_q4_1_q8_1_cuda7(
const void * vx, const void * vy, float * dst,
const int ncols_x, const int nrows_x, const int nrows_y, const int nrows_dst) {
mul_mat_vec_q<7, QK4_1, QI4_1, block_q4_1, VDR_Q4_1_Q8_1_MMVQ, vec_dot_q4_1_q8_1>
(vx, vy, dst, ncols_x, nrows_x, nrows_y, nrows_dst);
}
extern "C" __global__ void mul_mat_vec_q5_0_q8_1_cuda7(
const void * vx, const void * vy, float * dst,
const int ncols_x, const int nrows_x, const int nrows_y, const int nrows_dst) {
mul_mat_vec_q<7, QK5_0, QI5_0, block_q5_0, VDR_Q5_0_Q8_1_MMVQ, vec_dot_q5_0_q8_1>
(vx, vy, dst, ncols_x, nrows_x, nrows_y, nrows_dst);
}
extern "C" __global__ void mul_mat_vec_q5_1_q8_1_cuda7(
const void * vx, const void * vy, float * dst,
const int ncols_x, const int nrows_x, const int nrows_y, const int nrows_dst) {
mul_mat_vec_q<7, QK5_1, QI5_1, block_q5_1, VDR_Q5_1_Q8_1_MMVQ, vec_dot_q5_1_q8_1>
(vx, vy, dst, ncols_x, nrows_x, nrows_y, nrows_dst);
}
extern "C" __global__ void mul_mat_vec_q8_0_q8_1_cuda7(
const void * vx, const void * vy, float * dst,
const int ncols_x, const int nrows_x, const int nrows_y, const int nrows_dst) {
mul_mat_vec_q<7, QK8_0, QI8_0, block_q8_0, VDR_Q8_0_Q8_1_MMVQ, vec_dot_q8_0_q8_1>
(vx, vy, dst, ncols_x, nrows_x, nrows_y, nrows_dst);
}
extern "C" __global__ void mul_mat_vec_q2_K_q8_1_cuda7(
const void * vx, const void * vy, float * dst,
const int ncols_x, const int nrows_x, const int nrows_y, const int nrows_dst) {
mul_mat_vec_q<7, QK_K, QI2_K, block_q2_K, VDR_Q2_K_Q8_1_MMVQ, vec_dot_q2_K_q8_1>
(vx, vy, dst, ncols_x, nrows_x, nrows_y, nrows_dst);
}
extern "C" __global__ void mul_mat_vec_q3_K_q8_1_cuda7(
const void * vx, const void * vy, float * dst,
const int ncols_x, const int nrows_x, const int nrows_y, const int nrows_dst) {
mul_mat_vec_q<7, QK_K, QI3_K, block_q3_K, VDR_Q3_K_Q8_1_MMVQ, vec_dot_q3_K_q8_1>
(vx, vy, dst, ncols_x, nrows_x, nrows_y, nrows_dst);
}
extern "C" __global__ void mul_mat_vec_q4_K_q8_1_cuda7(
const void * vx, const void * vy, float * dst,
const int ncols_x, const int nrows_x, const int nrows_y, const int nrows_dst) {
mul_mat_vec_q<7, QK_K, QI4_K, block_q4_K, VDR_Q4_K_Q8_1_MMVQ, vec_dot_q4_K_q8_1>
(vx, vy, dst, ncols_x, nrows_x, nrows_y, nrows_dst);
}
extern "C" __global__ void mul_mat_vec_q5_K_q8_1_cuda7(
const void * vx, const void * vy, float * dst,
const int ncols_x, const int nrows_x, const int nrows_y, const int nrows_dst) {
mul_mat_vec_q<7, QK_K, QI5_K, block_q5_K, VDR_Q5_K_Q8_1_MMVQ, vec_dot_q5_K_q8_1>
(vx, vy, dst, ncols_x, nrows_x, nrows_y, nrows_dst);
}
extern "C" __global__ void mul_mat_vec_q6_K_q8_1_cuda7(
const void * vx, const void * vy, float * dst,
const int ncols_x, const int nrows_x, const int nrows_y, const int nrows_dst) {
mul_mat_vec_q<7, QK_K, QI6_K, block_q6_K, VDR_Q6_K_Q8_1_MMVQ, vec_dot_q6_K_q8_1>
(vx, vy, dst, ncols_x, nrows_x, nrows_y, nrows_dst);
}
// batch size = 8
extern "C" __global__ void mul_mat_vec_q4_0_q8_1_cuda8(
const void * vx, const void * vy, float * dst,
const int ncols_x, const int nrows_x, const int nrows_y, const int nrows_dst) {
mul_mat_vec_q<8, QK4_0, QI4_0, block_q4_0, VDR_Q4_0_Q8_1_MMVQ, vec_dot_q4_0_q8_1>
(vx, vy, dst, ncols_x, nrows_x, nrows_y, nrows_dst);
}
extern "C" __global__ void mul_mat_vec_q4_1_q8_1_cuda8(
const void * vx, const void * vy, float * dst,
const int ncols_x, const int nrows_x, const int nrows_y, const int nrows_dst) {
mul_mat_vec_q<8, QK4_1, QI4_1, block_q4_1, VDR_Q4_1_Q8_1_MMVQ, vec_dot_q4_1_q8_1>
(vx, vy, dst, ncols_x, nrows_x, nrows_y, nrows_dst);
}
extern "C" __global__ void mul_mat_vec_q5_0_q8_1_cuda8(
const void * vx, const void * vy, float * dst,
const int ncols_x, const int nrows_x, const int nrows_y, const int nrows_dst) {
mul_mat_vec_q<8, QK5_0, QI5_0, block_q5_0, VDR_Q5_0_Q8_1_MMVQ, vec_dot_q5_0_q8_1>
(vx, vy, dst, ncols_x, nrows_x, nrows_y, nrows_dst);
}
extern "C" __global__ void mul_mat_vec_q5_1_q8_1_cuda8(
const void * vx, const void * vy, float * dst,
const int ncols_x, const int nrows_x, const int nrows_y, const int nrows_dst) {
mul_mat_vec_q<8, QK5_1, QI5_1, block_q5_1, VDR_Q5_1_Q8_1_MMVQ, vec_dot_q5_1_q8_1>
(vx, vy, dst, ncols_x, nrows_x, nrows_y, nrows_dst);
}
extern "C" __global__ void mul_mat_vec_q8_0_q8_1_cuda8(
const void * vx, const void * vy, float * dst,
const int ncols_x, const int nrows_x, const int nrows_y, const int nrows_dst) {
mul_mat_vec_q<8, QK8_0, QI8_0, block_q8_0, VDR_Q8_0_Q8_1_MMVQ, vec_dot_q8_0_q8_1>
(vx, vy, dst, ncols_x, nrows_x, nrows_y, nrows_dst);
}
extern "C" __global__ void mul_mat_vec_q2_K_q8_1_cuda8(
const void * vx, const void * vy, float * dst,
const int ncols_x, const int nrows_x, const int nrows_y, const int nrows_dst) {
mul_mat_vec_q<8, QK_K, QI2_K, block_q2_K, VDR_Q2_K_Q8_1_MMVQ, vec_dot_q2_K_q8_1>
(vx, vy, dst, ncols_x, nrows_x, nrows_y, nrows_dst);
}
extern "C" __global__ void mul_mat_vec_q3_K_q8_1_cuda8(
const void * vx, const void * vy, float * dst,
const int ncols_x, const int nrows_x, const int nrows_y, const int nrows_dst) {
mul_mat_vec_q<8, QK_K, QI3_K, block_q3_K, VDR_Q3_K_Q8_1_MMVQ, vec_dot_q3_K_q8_1>
(vx, vy, dst, ncols_x, nrows_x, nrows_y, nrows_dst);
}
extern "C" __global__ void mul_mat_vec_q4_K_q8_1_cuda8(
const void * vx, const void * vy, float * dst,
const int ncols_x, const int nrows_x, const int nrows_y, const int nrows_dst) {
mul_mat_vec_q<8, QK_K, QI4_K, block_q4_K, VDR_Q4_K_Q8_1_MMVQ, vec_dot_q4_K_q8_1>
(vx, vy, dst, ncols_x, nrows_x, nrows_y, nrows_dst);
}
extern "C" __global__ void mul_mat_vec_q5_K_q8_1_cuda8(
const void * vx, const void * vy, float * dst,
const int ncols_x, const int nrows_x, const int nrows_y, const int nrows_dst) {
mul_mat_vec_q<8, QK_K, QI5_K, block_q5_K, VDR_Q5_K_Q8_1_MMVQ, vec_dot_q5_K_q8_1>
(vx, vy, dst, ncols_x, nrows_x, nrows_y, nrows_dst);
}
extern "C" __global__ void mul_mat_vec_q6_K_q8_1_cuda8(
const void * vx, const void * vy, float * dst,
const int ncols_x, const int nrows_x, const int nrows_y, const int nrows_dst) {
mul_mat_vec_q<8, QK_K, QI6_K, block_q6_K, VDR_Q6_K_Q8_1_MMVQ, vec_dot_q6_K_q8_1>
(vx, vy, dst, ncols_x, nrows_x, nrows_y, nrows_dst);
}
extern "C" __global__ void quantize_q8_1(const float * __restrict__ x, void * __restrict__ vy, const int kx, const int kx_padded) {
const int ix = blockDim.x*blockIdx.x + threadIdx.x;
if (ix >= kx_padded) {
return;
}
const int iy = blockDim.y*blockIdx.y + threadIdx.y;
const int i_padded = iy*kx_padded + ix;
block_q8_1 * y = (block_q8_1 *) vy;
const int ib = i_padded / QK8_1; // block index
const int iqs = i_padded % QK8_1; // quant index
const float xi = ix < kx ? x[iy*kx + ix] : 0.0f;
float amax = fabsf(xi);
float sum = xi;
amax = warp_reduce_max(amax);
sum = warp_reduce_sum(sum);
const float d = amax / 127;
const int8_t q = amax == 0.0f ? 0 : roundf(xi / d);
y[ib].qs[iqs] = q;
if (iqs > 0) {
return;
}
reinterpret_cast<half&>(y[ib].ds.x) = d;
reinterpret_cast<half&>(y[ib].ds.y) = sum;
}
// Kernels from https://github.com/ggerganov/llama.cpp/blob/master/ggml-cuda/mmq.cu
template <int mmq_y> static __device__ __forceinline__ void allocate_tiles_q5_0(int ** x_ql, half2 ** x_dm, int ** x_qh, int ** x_sc) {
GGML_UNUSED(x_qh); GGML_UNUSED(x_sc);
__shared__ int tile_x_ql[mmq_y * (2*WARP_SIZE) + mmq_y];
__shared__ float tile_x_d[mmq_y * (WARP_SIZE/QI5_0) + mmq_y/QI5_0];
*x_ql = tile_x_ql;
*x_dm = (half2 *) tile_x_d;
}
template <int mmq_y, int nwarps, bool need_check> static __device__ __forceinline__ void load_tiles_q5_0(
const void * __restrict__ vx, int * __restrict__ x_ql, half2 * __restrict__ x_dm, int * __restrict__ x_qh,
int * __restrict__ x_sc, const int & i_offset, const int & i_max, const int & k, const int & blocks_per_row) {
GGML_UNUSED(x_qh); GGML_UNUSED(x_sc);
GGML_CUDA_ASSUME(i_offset >= 0);
GGML_CUDA_ASSUME(i_offset < nwarps);
GGML_CUDA_ASSUME(k >= 0);
GGML_CUDA_ASSUME(k < WARP_SIZE);
const int kbx = k / QI5_0;
const int kqsx = k % QI5_0;
const block_q5_0 * bx0 = (const block_q5_0 *) vx;
#pragma unroll
for (int i0 = 0; i0 < mmq_y; i0 += nwarps) {
int i = i0 + i_offset;
if (need_check) {
i = min(i, i_max);
}
const block_q5_0 * bxi = bx0 + i*blocks_per_row + kbx;
const int ql = get_int_from_uint8(bxi->qs, kqsx);
const int qh = get_int_from_uint8(bxi->qh, 0) >> (4 * (k % QI5_0));
int qs0 = (ql >> 0) & 0x0F0F0F0F;
qs0 |= (qh << 4) & 0x00000010; // 0 -> 4
qs0 |= (qh << 11) & 0x00001000; // 1 -> 12
qs0 |= (qh << 18) & 0x00100000; // 2 -> 20
qs0 |= (qh << 25) & 0x10000000; // 3 -> 28
qs0 = __vsubss4(qs0, 0x10101010); // subtract 16
x_ql[i * (2*WARP_SIZE + 1) + 2*k+0] = qs0;
int qs1 = (ql >> 4) & 0x0F0F0F0F;
qs1 |= (qh >> 12) & 0x00000010; // 16 -> 4
qs1 |= (qh >> 5) & 0x00001000; // 17 -> 12
qs1 |= (qh << 2) & 0x00100000; // 18 -> 20
qs1 |= (qh << 9) & 0x10000000; // 19 -> 28
qs1 = __vsubss4(qs1, 0x10101010); // subtract 16
x_ql[i * (2*WARP_SIZE + 1) + 2*k+1] = qs1;
}
const int blocks_per_tile_x_row = WARP_SIZE / QI5_0;
const int kbxd = k % blocks_per_tile_x_row;
float * x_dmf = (float *) x_dm;
#pragma unroll
for (int i0 = 0; i0 < mmq_y; i0 += nwarps * QI5_0) {
int i = i0 + i_offset * QI5_0 + k / blocks_per_tile_x_row;
if (need_check) {
i = min(i, i_max);
}
const block_q5_0 * bxi = bx0 + i*blocks_per_row + kbxd;
x_dmf[i * (WARP_SIZE/QI5_0) + i / QI5_0 + kbxd] = bxi->d;
}
}
static __device__ __forceinline__ float vec_dot_q5_0_q8_1_mul_mat(
const int * __restrict__ x_ql, const half2 * __restrict__ x_dm, const int * __restrict__ x_qh, const int * __restrict__ x_sc,
const int * __restrict__ y_qs, const half2 * __restrict__ y_ds, const int & i, const int & j, const int & k) {
GGML_UNUSED(x_qh); GGML_UNUSED(x_sc);
const int kyqs = k % (QI8_1/2) + QI8_1 * (k / (QI8_1/2));
const int index_bx = i * (WARP_SIZE/QI5_0) + i/QI5_0 + k/QI5_0;
const float * x_dmf = (const float *) x_dm;
const float * y_df = (const float *) y_ds;
int u[2*VDR_Q5_0_Q8_1_MMQ];
#pragma unroll
for (int l = 0; l < VDR_Q5_0_Q8_1_MMQ; ++l) {
u[2*l+0] = y_qs[j * WARP_SIZE + (kyqs + l) % WARP_SIZE];
u[2*l+1] = y_qs[j * WARP_SIZE + (kyqs + l + QI5_0) % WARP_SIZE];
}
return vec_dot_q8_0_q8_1_impl<QR5_0*VDR_Q5_0_Q8_1_MMQ>
(&x_ql[i * (2*WARP_SIZE + 1) + 2 * k], u, x_dmf[index_bx], y_df[j * (WARP_SIZE/QI8_1) + (2*k/QI8_1) % (WARP_SIZE/QI8_1)]);
}
template <int mmq_y> static __device__ __forceinline__ void allocate_tiles_q5_1(int ** x_ql, half2 ** x_dm, int ** x_qh, int ** x_sc) {
GGML_UNUSED(x_qh); GGML_UNUSED(x_sc);
__shared__ int tile_x_ql[mmq_y * (2*WARP_SIZE) + mmq_y];
__shared__ half2 tile_x_dm[mmq_y * (WARP_SIZE/QI5_1) + mmq_y/QI5_1];
*x_ql = tile_x_ql;
*x_dm = tile_x_dm;
}
template <int mmq_y, int nwarps, bool need_check> static __device__ __forceinline__ void load_tiles_q5_1(
const void * __restrict__ vx, int * __restrict__ x_ql, half2 * __restrict__ x_dm, int * __restrict__ x_qh,
int * __restrict__ x_sc, const int & i_offset, const int & i_max, const int & k, const int & blocks_per_row) {
GGML_UNUSED(x_qh); GGML_UNUSED(x_sc);
GGML_CUDA_ASSUME(i_offset >= 0);
GGML_CUDA_ASSUME(i_offset < nwarps);
GGML_CUDA_ASSUME(k >= 0);
GGML_CUDA_ASSUME(k < WARP_SIZE);
const int kbx = k / QI5_1;
const int kqsx = k % QI5_1;
const block_q5_1 * bx0 = (const block_q5_1 *) vx;
#pragma unroll
for (int i0 = 0; i0 < mmq_y; i0 += nwarps) {
int i = i0 + i_offset;
if (need_check) {
i = min(i, i_max);
}
const block_q5_1 * bxi = bx0 + i*blocks_per_row + kbx;
const int ql = get_int_from_uint8_aligned(bxi->qs, kqsx);
const int qh = get_int_from_uint8_aligned(bxi->qh, 0) >> (4 * (k % QI5_1));
int qs0 = (ql >> 0) & 0x0F0F0F0F;
qs0 |= (qh << 4) & 0x00000010; // 0 -> 4
qs0 |= (qh << 11) & 0x00001000; // 1 -> 12
qs0 |= (qh << 18) & 0x00100000; // 2 -> 20
qs0 |= (qh << 25) & 0x10000000; // 3 -> 28
x_ql[i * (2*WARP_SIZE + 1) + 2*k+0] = qs0;
int qs1 = (ql >> 4) & 0x0F0F0F0F;
qs1 |= (qh >> 12) & 0x00000010; // 16 -> 4
qs1 |= (qh >> 5) & 0x00001000; // 17 -> 12
qs1 |= (qh << 2) & 0x00100000; // 18 -> 20
qs1 |= (qh << 9) & 0x10000000; // 19 -> 28
x_ql[i * (2*WARP_SIZE + 1) + 2*k+1] = qs1;
}
const int blocks_per_tile_x_row = WARP_SIZE / QI5_1;
const int kbxd = k % blocks_per_tile_x_row;
#pragma unroll
for (int i0 = 0; i0 < mmq_y; i0 += nwarps * QI5_1) {
int i = i0 + i_offset * QI5_1 + k / blocks_per_tile_x_row;
if (need_check) {
i = min(i, i_max);
}
const block_q5_1 * bxi = bx0 + i*blocks_per_row + kbxd;
x_dm[i * (WARP_SIZE/QI5_1) + i / QI5_1 + kbxd] = bxi->dm;
}
}
static __device__ __forceinline__ float vec_dot_q5_1_q8_1_mul_mat(
const int * __restrict__ x_ql, const half2 * __restrict__ x_dm, const int * __restrict__ x_qh, const int * __restrict__ x_sc,
const int * __restrict__ y_qs, const half2 * __restrict__ y_ds, const int & i, const int & j, const int & k) {
GGML_UNUSED(x_qh); GGML_UNUSED(x_sc);
const int kyqs = k % (QI8_1/2) + QI8_1 * (k / (QI8_1/2));
const int index_bx = i * (WARP_SIZE/QI5_1) + + i/QI5_1 + k/QI5_1;
int u[2*VDR_Q5_1_Q8_1_MMQ];
#pragma unroll
for (int l = 0; l < VDR_Q5_1_Q8_1_MMQ; ++l) {
u[2*l+0] = y_qs[j * WARP_SIZE + (kyqs + l) % WARP_SIZE];
u[2*l+1] = y_qs[j * WARP_SIZE + (kyqs + l + QI5_1) % WARP_SIZE];
}
return vec_dot_q8_1_q8_1_impl<QR5_1*VDR_Q5_1_Q8_1_MMQ>
(&x_ql[i * (2*WARP_SIZE + 1) + 2 * k], u, x_dm[index_bx], y_ds[j * (WARP_SIZE/QI8_1) + (2*k/QI8_1) % (WARP_SIZE/QI8_1)]);
}
template <int mmq_y> static __device__ __forceinline__ void allocate_tiles_q8_0(int ** x_ql, half2 ** x_dm, int ** x_qh, int ** x_sc) {
GGML_UNUSED(x_qh); GGML_UNUSED(x_sc);
__shared__ int tile_x_qs[mmq_y * (WARP_SIZE) + mmq_y];
__shared__ float tile_x_d[mmq_y * (WARP_SIZE/QI8_0) + mmq_y/QI8_0];
*x_ql = tile_x_qs;
*x_dm = (half2 *) tile_x_d;
}
template <int mmq_y, int nwarps, bool need_check> static __device__ __forceinline__ void load_tiles_q8_0(
const void * __restrict__ vx, int * __restrict__ x_ql, half2 * __restrict__ x_dm, int * __restrict__ x_qh,
int * __restrict__ x_sc, const int & i_offset, const int & i_max, const int & k, const int & blocks_per_row) {
GGML_UNUSED(x_qh); GGML_UNUSED(x_sc);
GGML_CUDA_ASSUME(i_offset >= 0);
GGML_CUDA_ASSUME(i_offset < nwarps);
GGML_CUDA_ASSUME(k >= 0);
GGML_CUDA_ASSUME(k < WARP_SIZE);
const int kbx = k / QI8_0;
const int kqsx = k % QI8_0;
float * x_dmf = (float *) x_dm;
const block_q8_0 * bx0 = (const block_q8_0 *) vx;
#pragma unroll
for (int i0 = 0; i0 < mmq_y; i0 += nwarps) {
int i = i0 + i_offset;
if (need_check) {
i = min(i, i_max);
}
const block_q8_0 * bxi = bx0 + i*blocks_per_row + kbx;
x_ql[i * (WARP_SIZE + 1) + k] = get_int_from_int8(bxi->qs, kqsx);
}
const int blocks_per_tile_x_row = WARP_SIZE / QI8_0;
const int kbxd = k % blocks_per_tile_x_row;
#pragma unroll
for (int i0 = 0; i0 < mmq_y; i0 += nwarps * QI8_0) {
int i = i0 + i_offset * QI8_0 + k / blocks_per_tile_x_row;
if (need_check) {
i = min(i, i_max);
}
const block_q8_0 * bxi = bx0 + i*blocks_per_row + kbxd;
x_dmf[i * (WARP_SIZE/QI8_0) + i / QI8_0 + kbxd] = bxi->d;
}
}
static __device__ __forceinline__ float vec_dot_q8_0_q8_1_mul_mat(
const int * __restrict__ x_ql, const half2 * __restrict__ x_dm, const int * __restrict__ x_qh, const int * __restrict__ x_sc,
const int * __restrict__ y_qs, const half2 * __restrict__ y_ds, const int & i, const int & j, const int & k) {
GGML_UNUSED(x_qh); GGML_UNUSED(x_sc);
const float * x_dmf = (const float *) x_dm;
const float * y_df = (const float *) y_ds;
return vec_dot_q8_0_q8_1_impl<VDR_Q8_0_Q8_1_MMQ>
(&x_ql[i * (WARP_SIZE + 1) + k], &y_qs[j * WARP_SIZE + k], x_dmf[i * (WARP_SIZE/QI8_0) + i/QI8_0 + k/QI8_0],
y_df[j * (WARP_SIZE/QI8_1) + k/QI8_1]);
}
template <int mmq_y> static __device__ __forceinline__ void allocate_tiles_q2_K(int ** x_ql, half2 ** x_dm, int ** x_qh, int ** x_sc) {
GGML_UNUSED(x_qh);
__shared__ int tile_x_ql[mmq_y * (WARP_SIZE) + mmq_y];
__shared__ half2 tile_x_dm[mmq_y * (WARP_SIZE/QI2_K) + mmq_y/QI2_K];
__shared__ int tile_x_sc[mmq_y * (WARP_SIZE/4) + mmq_y/4];
*x_ql = tile_x_ql;
*x_dm = tile_x_dm;
*x_sc = tile_x_sc;
}
template <int mmq_y, int nwarps, bool need_check> static __device__ __forceinline__ void load_tiles_q2_K(
const void * __restrict__ vx, int * __restrict__ x_ql, half2 * __restrict__ x_dm, int * __restrict__ x_qh,
int * __restrict__ x_sc, const int & i_offset, const int & i_max, const int & k, const int & blocks_per_row) {
GGML_UNUSED(x_qh);
GGML_CUDA_ASSUME(i_offset >= 0);
GGML_CUDA_ASSUME(i_offset < nwarps);
GGML_CUDA_ASSUME(k >= 0);
GGML_CUDA_ASSUME(k < WARP_SIZE);
const int kbx = k / QI2_K;
const int kqsx = k % QI2_K;
const block_q2_K * bx0 = (const block_q2_K *) vx;
#pragma unroll
for (int i0 = 0; i0 < mmq_y; i0 += nwarps) {
int i = i0 + i_offset;
if (need_check) {
i = min(i, i_max);
}
const block_q2_K * bxi = bx0 + i*blocks_per_row + kbx;
x_ql[i * (WARP_SIZE + 1) + k] = get_int_from_uint8_aligned(bxi->qs, kqsx);
}
const int blocks_per_tile_x_row = WARP_SIZE / QI2_K;
const int kbxd = k % blocks_per_tile_x_row;
#pragma unroll
for (int i0 = 0; i0 < mmq_y; i0 += nwarps * QI2_K) {
int i = (i0 + i_offset * QI2_K + k / blocks_per_tile_x_row) % mmq_y;
if (need_check) {
i = min(i, i_max);
}
const block_q2_K * bxi = bx0 + i*blocks_per_row + kbxd;
x_dm[i * (WARP_SIZE/QI2_K) + i / QI2_K + kbxd] = bxi->dm;
}
#pragma unroll
for (int i0 = 0; i0 < mmq_y; i0 += nwarps * 4) {
int i = i0 + i_offset * 4 + k / (WARP_SIZE/4);
if (need_check) {
i = min(i, i_max);
}
const block_q2_K * bxi = bx0 + i*blocks_per_row + (k % (WARP_SIZE/4)) / (QI2_K/4);
x_sc[i * (WARP_SIZE/4) + i / 4 + k % (WARP_SIZE/4)] = get_int_from_uint8_aligned(bxi->scales, k % (QI2_K/4));
}
}
static __device__ __forceinline__ float vec_dot_q2_K_q8_1_mul_mat(
const int * __restrict__ x_ql, const half2 * __restrict__ x_dm, const int * __restrict__ x_qh, const int * __restrict__ x_sc,
const int * __restrict__ y_qs, const half2 * __restrict__ y_ds, const int & i, const int & j, const int & k) {
GGML_UNUSED(x_qh);
const int kbx = k / QI2_K;
const int ky = (k % QI2_K) * QR2_K;
const float * y_df = (const float *) y_ds;
int v[QR2_K*VDR_Q2_K_Q8_1_MMQ];
const int kqsx = i * (WARP_SIZE + 1) + kbx*QI2_K + (QI2_K/2) * (ky/(2*QI2_K)) + ky % (QI2_K/2);
const int shift = 2 * ((ky % (2*QI2_K)) / (QI2_K/2));
#pragma unroll
for (int l = 0; l < QR2_K*VDR_Q2_K_Q8_1_MMQ; ++l) {
v[l] = (x_ql[kqsx + l] >> shift) & 0x03030303;
}
const uint8_t * scales = ((const uint8_t *) &x_sc[i * (WARP_SIZE/4) + i/4 + kbx*4]) + ky/4;
const int index_y = j * WARP_SIZE + (QR2_K*k) % WARP_SIZE;
return vec_dot_q2_K_q8_1_impl_mmq(v, &y_qs[index_y], scales, x_dm[i * (WARP_SIZE/QI2_K) + i/QI2_K + kbx], y_df[index_y/QI8_1]);
}
template <int mmq_y> static __device__ __forceinline__ void allocate_tiles_q3_K(int ** x_ql, half2 ** x_dm, int ** x_qh, int ** x_sc) {
__shared__ int tile_x_ql[mmq_y * (WARP_SIZE) + mmq_y];
__shared__ half2 tile_x_dm[mmq_y * (WARP_SIZE/QI3_K) + mmq_y/QI3_K];
__shared__ int tile_x_qh[mmq_y * (WARP_SIZE/2) + mmq_y/2];
__shared__ int tile_x_sc[mmq_y * (WARP_SIZE/4) + mmq_y/4];
*x_ql = tile_x_ql;
*x_dm = tile_x_dm;
*x_qh = tile_x_qh;
*x_sc = tile_x_sc;
}
template <int mmq_y, int nwarps, bool need_check> static __device__ __forceinline__ void load_tiles_q3_K(
const void * __restrict__ vx, int * __restrict__ x_ql, half2 * __restrict__ x_dm, int * __restrict__ x_qh,
int * __restrict__ x_sc, const int & i_offset, const int & i_max, const int & k, const int & blocks_per_row) {
GGML_CUDA_ASSUME(i_offset >= 0);
GGML_CUDA_ASSUME(i_offset < nwarps);
GGML_CUDA_ASSUME(k >= 0);
GGML_CUDA_ASSUME(k < WARP_SIZE);
const int kbx = k / QI3_K;
const int kqsx = k % QI3_K;
const block_q3_K * bx0 = (const block_q3_K *) vx;
#pragma unroll
for (int i0 = 0; i0 < mmq_y; i0 += nwarps) {
int i = i0 + i_offset;
if (need_check) {
i = min(i, i_max);
}
const block_q3_K * bxi = bx0 + i*blocks_per_row + kbx;
x_ql[i * (WARP_SIZE + 1) + k] = get_int_from_uint8(bxi->qs, kqsx);
}
const int blocks_per_tile_x_row = WARP_SIZE / QI3_K;
const int kbxd = k % blocks_per_tile_x_row;
float * x_dmf = (float *) x_dm;
#pragma unroll
for (int i0 = 0; i0 < mmq_y; i0 += nwarps * QI3_K) {
int i = (i0 + i_offset * QI3_K + k / blocks_per_tile_x_row) % mmq_y;
if (need_check) {
i = min(i, i_max);
}
const block_q3_K * bxi = bx0 + i*blocks_per_row + kbxd;
x_dmf[i * (WARP_SIZE/QI3_K) + i / QI3_K + kbxd] = bxi->d;
}
#pragma unroll
for (int i0 = 0; i0 < mmq_y; i0 += nwarps * 2) {
int i = i0 + i_offset * 2 + k / (WARP_SIZE/2);
if (need_check) {
i = min(i, i_max);
}
const block_q3_K * bxi = bx0 + i*blocks_per_row + (k % (WARP_SIZE/2)) / (QI3_K/2);
// invert the mask with ~ so that a 0/1 results in 4/0 being subtracted
x_qh[i * (WARP_SIZE/2) + i / 2 + k % (WARP_SIZE/2)] = ~get_int_from_uint8(bxi->hmask, k % (QI3_K/2));
}
#pragma unroll
for (int i0 = 0; i0 < mmq_y; i0 += nwarps * 4) {
int i = i0 + i_offset * 4 + k / (WARP_SIZE/4);
if (need_check) {
i = min(i, i_max);
}
const block_q3_K * bxi = bx0 + i*blocks_per_row + (k % (WARP_SIZE/4)) / (QI3_K/4);
const int ksc = k % (QI3_K/4);
const int ksc_low = ksc % (QI3_K/8);
const int shift_low = 4 * (ksc / (QI3_K/8));
const int sc_low = (get_int_from_uint8(bxi->scales, ksc_low) >> shift_low) & 0x0F0F0F0F;
const int ksc_high = QI3_K/8;
const int shift_high = 2 * ksc;
const int sc_high = ((get_int_from_uint8(bxi->scales, ksc_high) >> shift_high) << 4) & 0x30303030;
const int sc = __vsubss4(sc_low | sc_high, 0x20202020);
x_sc[i * (WARP_SIZE/4) + i / 4 + k % (WARP_SIZE/4)] = sc;
}
}
static __device__ __forceinline__ float vec_dot_q3_K_q8_1_mul_mat(
const int * __restrict__ x_ql, const half2 * __restrict__ x_dm, const int * __restrict__ x_qh, const int * __restrict__ x_sc,
const int * __restrict__ y_qs, const half2 * __restrict__ y_ds, const int & i, const int & j, const int & k) {
const int kbx = k / QI3_K;
const int ky = (k % QI3_K) * QR3_K;
const float * x_dmf = (const float *) x_dm;
const float * y_df = (const float *) y_ds;
const int8_t * scales = ((const int8_t *) (x_sc + i * (WARP_SIZE/4) + i/4 + kbx*4)) + ky/4;
int v[QR3_K*VDR_Q3_K_Q8_1_MMQ];
#pragma unroll
for (int l = 0; l < QR3_K*VDR_Q3_K_Q8_1_MMQ; ++l) {
const int kqsx = i * (WARP_SIZE + 1) + kbx*QI3_K + (QI3_K/2) * (ky/(2*QI3_K)) + ky % (QI3_K/2);
const int shift = 2 * ((ky % 32) / 8);
const int vll = (x_ql[kqsx + l] >> shift) & 0x03030303;
const int vh = x_qh[i * (WARP_SIZE/2) + i/2 + kbx * (QI3_K/2) + (ky+l)%8] >> ((ky+l) / 8);
const int vlh = (vh << 2) & 0x04040404;
v[l] = __vsubss4(vll, vlh);
}
const int index_y = j * WARP_SIZE + (k*QR3_K) % WARP_SIZE;
return vec_dot_q3_K_q8_1_impl_mmq(v, &y_qs[index_y], scales, x_dmf[i * (WARP_SIZE/QI3_K) + i/QI3_K + kbx], y_df[index_y/QI8_1]);
}
template <int mmq_y> static __device__ __forceinline__ void allocate_tiles_q4_K(int ** x_ql, half2 ** x_dm, int ** x_qh, int ** x_sc) {
GGML_UNUSED(x_qh);
__shared__ int tile_x_ql[mmq_y * (WARP_SIZE) + mmq_y];
__shared__ half2 tile_x_dm[mmq_y * (WARP_SIZE/QI4_K) + mmq_y/QI4_K];
__shared__ int tile_x_sc[mmq_y * (WARP_SIZE/8) + mmq_y/8];
*x_ql = tile_x_ql;
*x_dm = tile_x_dm;
*x_sc = tile_x_sc;
}
template <int mmq_y, int nwarps, bool need_check> static __device__ __forceinline__ void load_tiles_q4_K(
const void * __restrict__ vx, int * __restrict__ x_ql, half2 * __restrict__ x_dm, int * __restrict__ x_qh,
int * __restrict__ x_sc, const int & i_offset, const int & i_max, const int & k, const int & blocks_per_row) {
GGML_UNUSED(x_qh);
GGML_CUDA_ASSUME(i_offset >= 0);
GGML_CUDA_ASSUME(i_offset < nwarps);
GGML_CUDA_ASSUME(k >= 0);
GGML_CUDA_ASSUME(k < WARP_SIZE);
const int kbx = k / QI4_K; // == 0 if QK_K == 256
const int kqsx = k % QI4_K; // == k if QK_K == 256
const block_q4_K * bx0 = (const block_q4_K *) vx;
#pragma unroll
for (int i0 = 0; i0 < mmq_y; i0 += nwarps) {
int i = i0 + i_offset;
if (need_check) {
i = min(i, i_max);
}
const block_q4_K * bxi = bx0 + i*blocks_per_row + kbx;
x_ql[i * (WARP_SIZE + 1) + k] = get_int_from_uint8_aligned(bxi->qs, kqsx);
}
const int blocks_per_tile_x_row = WARP_SIZE / QI4_K; // == 1 if QK_K == 256
const int kbxd = k % blocks_per_tile_x_row; // == 0 if QK_K == 256
#pragma unroll
for (int i0 = 0; i0 < mmq_y; i0 += nwarps * QI4_K) {
int i = (i0 + i_offset * QI4_K + k / blocks_per_tile_x_row) % mmq_y;
if (need_check) {
i = min(i, i_max);
}
const block_q4_K * bxi = bx0 + i*blocks_per_row + kbxd;
#if QK_K == 256
x_dm[i * (WARP_SIZE/QI4_K) + i / QI4_K + kbxd] = bxi->dm;
#else
x_dm[i * (WARP_SIZE/QI4_K) + i / QI4_K + kbxd] = {bxi->dm[0], bxi->dm[1]};
#endif
}
#pragma unroll
for (int i0 = 0; i0 < mmq_y; i0 += nwarps * 8) {
int i = (i0 + i_offset * 8 + k / (WARP_SIZE/8)) % mmq_y;
if (need_check) {
i = min(i, i_max);
}
const block_q4_K * bxi = bx0 + i*blocks_per_row + (k % (WARP_SIZE/8)) / (QI4_K/8);
const int * scales = (const int *) bxi->scales;
const int ksc = k % (WARP_SIZE/8);
// scale arrangement after the following two lines: sc0,...,sc3, sc4,...,sc7, m0,...,m3, m4,...,m8
int scales8 = (scales[(ksc%2) + (ksc!=0)] >> (4 * (ksc & (ksc/2)))) & 0x0F0F0F0F; // lower 4 bits
scales8 |= (scales[ksc/2] >> (2 * (ksc % 2))) & 0x30303030; // upper 2 bits
x_sc[i * (WARP_SIZE/8) + i / 8 + ksc] = scales8;
}
}
static __device__ __forceinline__ float vec_dot_q4_K_q8_1_mul_mat(
const int * __restrict__ x_ql, const half2 * __restrict__ x_dm, const int * __restrict__ x_qh, const int * __restrict__ x_sc,
const int * __restrict__ y_qs, const half2 * __restrict__ y_ds, const int & i, const int & j, const int & k) {
GGML_UNUSED(x_qh);
const uint8_t * sc = ((const uint8_t *) &x_sc[i * (WARP_SIZE/8) + i/8 + k/16]) + 2*((k % 16) / 8);
const int index_y = j * WARP_SIZE + (QR4_K*k) % WARP_SIZE;
return vec_dot_q4_K_q8_1_impl_mmq(&x_ql[i * (WARP_SIZE + 1) + k], &y_qs[index_y], sc, sc+8,
x_dm[i * (WARP_SIZE/QI4_K) + i/QI4_K], &y_ds[index_y/QI8_1]);
}
template <int mmq_y> static __device__ __forceinline__ void allocate_tiles_q5_K(int ** x_ql, half2 ** x_dm, int ** x_qh, int ** x_sc) {
GGML_UNUSED(x_qh);
__shared__ int tile_x_ql[mmq_y * (2*WARP_SIZE) + mmq_y];
__shared__ half2 tile_x_dm[mmq_y * (WARP_SIZE/QI5_K) + mmq_y/QI5_K];
__shared__ int tile_x_sc[mmq_y * (WARP_SIZE/8) + mmq_y/8];
*x_ql = tile_x_ql;
*x_dm = tile_x_dm;
*x_sc = tile_x_sc;
}
template <int mmq_y, int nwarps, bool need_check> static __device__ __forceinline__ void load_tiles_q5_K(
const void * __restrict__ vx, int * __restrict__ x_ql, half2 * __restrict__ x_dm, int * __restrict__ x_qh,
int * __restrict__ x_sc, const int & i_offset, const int & i_max, const int & k, const int & blocks_per_row) {
GGML_UNUSED(x_qh);
GGML_CUDA_ASSUME(i_offset >= 0);
GGML_CUDA_ASSUME(i_offset < nwarps);
GGML_CUDA_ASSUME(k >= 0);
GGML_CUDA_ASSUME(k < WARP_SIZE);
const int kbx = k / QI5_K; // == 0 if QK_K == 256
const int kqsx = k % QI5_K; // == k if QK_K == 256
const block_q5_K * bx0 = (const block_q5_K *) vx;
#pragma unroll
for (int i0 = 0; i0 < mmq_y; i0 += nwarps) {
int i = i0 + i_offset;
if (need_check) {
i = min(i, i_max);
}
const block_q5_K * bxi = bx0 + i*blocks_per_row + kbx;
const int ky = QR5_K*kqsx;
const int ql = get_int_from_uint8_aligned(bxi->qs, kqsx);
const int ql0 = (ql >> 0) & 0x0F0F0F0F;
const int ql1 = (ql >> 4) & 0x0F0F0F0F;
const int qh = get_int_from_uint8_aligned(bxi->qh, kqsx % (QI5_K/4));
const int qh0 = ((qh >> (2 * (kqsx / (QI5_K/4)) + 0)) << 4) & 0x10101010;
const int qh1 = ((qh >> (2 * (kqsx / (QI5_K/4)) + 1)) << 4) & 0x10101010;
const int kq0 = ky - ky % (QI5_K/2) + k % (QI5_K/4) + 0;
const int kq1 = ky - ky % (QI5_K/2) + k % (QI5_K/4) + (QI5_K/4);
x_ql[i * (2*WARP_SIZE + 1) + kq0] = ql0 | qh0;
x_ql[i * (2*WARP_SIZE + 1) + kq1] = ql1 | qh1;
}
const int blocks_per_tile_x_row = WARP_SIZE / QI5_K; // == 1 if QK_K == 256
const int kbxd = k % blocks_per_tile_x_row; // == 0 if QK_K == 256
#pragma unroll
for (int i0 = 0; i0 < mmq_y; i0 += nwarps * QI5_K) {
int i = (i0 + i_offset * QI5_K + k / blocks_per_tile_x_row) % mmq_y;
if (need_check) {
i = min(i, i_max);
}
const block_q5_K * bxi = bx0 + i*blocks_per_row + kbxd;
#if QK_K == 256
x_dm[i * (WARP_SIZE/QI5_K) + i / QI5_K + kbxd] = bxi->dm;
#endif
}
#pragma unroll
for (int i0 = 0; i0 < mmq_y; i0 += nwarps * 8) {
int i = (i0 + i_offset * 8 + k / (WARP_SIZE/8)) % mmq_y;
if (need_check) {
i = min(i, i_max);
}
const block_q5_K * bxi = bx0 + i*blocks_per_row + (k % (WARP_SIZE/8)) / (QI5_K/8);
const int * scales = (const int *) bxi->scales;
const int ksc = k % (WARP_SIZE/8);
// scale arrangement after the following two lines: sc0,...,sc3, sc4,...,sc7, m0,...,m3, m4,...,m8
int scales8 = (scales[(ksc%2) + (ksc!=0)] >> (4 * (ksc & (ksc/2)))) & 0x0F0F0F0F; // lower 4 bits
scales8 |= (scales[ksc/2] >> (2 * (ksc % 2))) & 0x30303030; // upper 2 bits
x_sc[i * (WARP_SIZE/8) + i / 8 + ksc] = scales8;
}
}
static __device__ __forceinline__ float vec_dot_q5_K_q8_1_mul_mat(
const int * __restrict__ x_ql, const half2 * __restrict__ x_dm, const int * __restrict__ x_qh, const int * __restrict__ x_sc,
const int * __restrict__ y_qs, const half2 * __restrict__ y_ds, const int & i, const int & j, const int & k) {
GGML_UNUSED(x_qh);
const uint8_t * sc = ((const uint8_t *) &x_sc[i * (WARP_SIZE/8) + i/8 + k/16]) + 2 * ((k % 16) / 8);
const int index_x = i * (QR5_K*WARP_SIZE + 1) + QR5_K*k;
const int index_y = j * WARP_SIZE + (QR5_K*k) % WARP_SIZE;
return vec_dot_q5_K_q8_1_impl_mmq(&x_ql[index_x], &y_qs[index_y], sc, sc+8,
x_dm[i * (WARP_SIZE/QI5_K) + i/QI5_K], &y_ds[index_y/QI8_1]);
}
template <int mmq_y> static __device__ __forceinline__ void allocate_tiles_q6_K(int ** x_ql, half2 ** x_dm, int ** x_qh, int ** x_sc) {
GGML_UNUSED(x_qh);
__shared__ int tile_x_ql[mmq_y * (2*WARP_SIZE) + mmq_y];
__shared__ half2 tile_x_dm[mmq_y * (WARP_SIZE/QI6_K) + mmq_y/QI6_K];
__shared__ int tile_x_sc[mmq_y * (WARP_SIZE/8) + mmq_y/8];
*x_ql = tile_x_ql;
*x_dm = tile_x_dm;
*x_sc = tile_x_sc;
}
template <int mmq_y, int nwarps, bool need_check> static __device__ __forceinline__ void load_tiles_q6_K(
const void * __restrict__ vx, int * __restrict__ x_ql, half2 * __restrict__ x_dm, int * __restrict__ x_qh,
int * __restrict__ x_sc, const int & i_offset, const int & i_max, const int & k, const int & blocks_per_row) {
GGML_UNUSED(x_qh);
GGML_CUDA_ASSUME(i_offset >= 0);
GGML_CUDA_ASSUME(i_offset < nwarps);
GGML_CUDA_ASSUME(k >= 0);
GGML_CUDA_ASSUME(k < WARP_SIZE);
const int kbx = k / QI6_K; // == 0 if QK_K == 256
const int kqsx = k % QI6_K; // == k if QK_K == 256
const block_q6_K * bx0 = (const block_q6_K *) vx;
#pragma unroll
for (int i0 = 0; i0 < mmq_y; i0 += nwarps) {
int i = i0 + i_offset;
if (need_check) {
i = min(i, i_max);
}
const block_q6_K * bxi = bx0 + i*blocks_per_row + kbx;
const int ky = QR6_K*kqsx;
const int ql = get_int_from_uint8(bxi->ql, kqsx);
const int ql0 = (ql >> 0) & 0x0F0F0F0F;
const int ql1 = (ql >> 4) & 0x0F0F0F0F;
const int qh = get_int_from_uint8(bxi->qh, (QI6_K/4) * (kqsx / (QI6_K/2)) + kqsx % (QI6_K/4));
const int qh0 = ((qh >> (2 * ((kqsx % (QI6_K/2)) / (QI6_K/4)))) << 4) & 0x30303030;
const int qh1 = (qh >> (2 * ((kqsx % (QI6_K/2)) / (QI6_K/4)))) & 0x30303030;
const int kq0 = ky - ky % QI6_K + k % (QI6_K/2) + 0;
const int kq1 = ky - ky % QI6_K + k % (QI6_K/2) + (QI6_K/2);
x_ql[i * (2*WARP_SIZE + 1) + kq0] = __vsubss4(ql0 | qh0, 0x20202020);
x_ql[i * (2*WARP_SIZE + 1) + kq1] = __vsubss4(ql1 | qh1, 0x20202020);
}
const int blocks_per_tile_x_row = WARP_SIZE / QI6_K; // == 1 if QK_K == 256
const int kbxd = k % blocks_per_tile_x_row; // == 0 if QK_K == 256
float * x_dmf = (float *) x_dm;
#pragma unroll
for (int i0 = 0; i0 < mmq_y; i0 += nwarps * QI6_K) {
int i = (i0 + i_offset * QI6_K + k / blocks_per_tile_x_row) % mmq_y;
if (need_check) {
i = min(i, i_max);
}
const block_q6_K * bxi = bx0 + i*blocks_per_row + kbxd;
x_dmf[i * (WARP_SIZE/QI6_K) + i / QI6_K + kbxd] = bxi->d;
}
#pragma unroll
for (int i0 = 0; i0 < mmq_y; i0 += nwarps * 8) {
int i = (i0 + i_offset * 8 + k / (WARP_SIZE/8)) % mmq_y;
if (need_check) {
i = min(i, i_max);
}
const block_q6_K * bxi = bx0 + i*blocks_per_row + (k % (WARP_SIZE/8)) / 4;
x_sc[i * (WARP_SIZE/8) + i / 8 + k % (WARP_SIZE/8)] = get_int_from_int8(bxi->scales, k % (QI6_K/8));
}
}
static __device__ __forceinline__ float vec_dot_q6_K_q8_1_mul_mat(
const int * __restrict__ x_ql, const half2 * __restrict__ x_dm, const int * __restrict__ x_qh, const int * __restrict__ x_sc,
const int * __restrict__ y_qs, const half2 * __restrict__ y_ds, const int & i, const int & j, const int & k) {
GGML_UNUSED(x_qh);
const float * x_dmf = (const float *) x_dm;
const float * y_df = (const float *) y_ds;
const int8_t * sc = ((const int8_t *) &x_sc[i * (WARP_SIZE/8) + i/8 + k/8]);
const int index_x = i * (QR6_K*WARP_SIZE + 1) + QR6_K*k;
const int index_y = j * WARP_SIZE + (QR6_K*k) % WARP_SIZE;
return vec_dot_q6_K_q8_1_impl_mmq(&x_ql[index_x], &y_qs[index_y], sc, x_dmf[i * (WARP_SIZE/QI6_K) + i/QI6_K], &y_df[index_y/QI8_1]);
}
static __device__ __forceinline__ float vec_dot_q4_0_q8_1_mul_mat(
const int * __restrict__ x_ql, const half2 * __restrict__ x_dm, const int * __restrict__ x_qh, const int * __restrict__ x_sc,
const int * __restrict__ y_qs, const half2 * __restrict__ y_ds, const int & i, const int & j, const int & k) {
const int kyqs = k % (QI8_1/2) + QI8_1 * (k / (QI8_1/2));
const float * x_dmf = (const float *) x_dm;
int u[2*VDR_Q4_0_Q8_1_MMQ];
#pragma unroll
for (int l = 0; l < VDR_Q4_0_Q8_1_MMQ; ++l) {
u[2*l+0] = y_qs[j * WARP_SIZE + (kyqs + l) % WARP_SIZE];
u[2*l+1] = y_qs[j * WARP_SIZE + (kyqs + l + QI4_0) % WARP_SIZE];
}
return vec_dot_q4_0_q8_1_impl<VDR_Q4_0_Q8_1_MMQ>
(&x_ql[i * (WARP_SIZE + 1) + k], u, x_dmf[i * (WARP_SIZE/QI4_0) + i/QI4_0 + k/QI4_0],
y_ds[j * (WARP_SIZE/QI8_1) + (2*k/QI8_1) % (WARP_SIZE/QI8_1)]);
}
static __device__ __forceinline__ float vec_dot_q4_1_q8_1_mul_mat(
const int * __restrict__ x_ql, const half2 * __restrict__ x_dm, const int * __restrict__ x_qh, const int * __restrict__ x_sc,
const int * __restrict__ y_qs, const half2 * __restrict__ y_ds, const int & i, const int & j, const int & k) {
GGML_UNUSED(x_qh); GGML_UNUSED(x_sc);
const int kyqs = k % (QI8_1/2) + QI8_1 * (k / (QI8_1/2));
int u[2*VDR_Q4_1_Q8_1_MMQ];
#pragma unroll
for (int l = 0; l < VDR_Q4_1_Q8_1_MMQ; ++l) {
u[2*l+0] = y_qs[j * WARP_SIZE + (kyqs + l) % WARP_SIZE];
u[2*l+1] = y_qs[j * WARP_SIZE + (kyqs + l + QI4_1) % WARP_SIZE];
}
return vec_dot_q4_1_q8_1_impl<VDR_Q4_1_Q8_1_MMQ>
(&x_ql[i * (WARP_SIZE + 1) + k], u, x_dm[i * (WARP_SIZE/QI4_1) + i/QI4_1 + k/QI4_1],
y_ds[j * (WARP_SIZE/QI8_1) + (2*k/QI8_1) % (WARP_SIZE/QI8_1)]);
}
extern "C" __global__ void
mul_mat_q4_0(
const void * __restrict__ vx, const void * __restrict__ vy, float * __restrict__ dst,
const int ncols_x, const int nrows_x, const int ncols_y, const int nrows_y, const int nrows_dst) {
const int mmq_x = MMQ_X_Q4_0_AMPERE;
const int mmq_y = MMQ_Y_Q4_0_AMPERE;
const int nwarps = NWARPS_Q4_0_AMPERE;
mul_mat_q<QK4_0, QR4_0, QI4_0, true, block_q4_0, mmq_x, mmq_y, nwarps, allocate_tiles_q4_0<mmq_y>,
load_tiles_q4_0<mmq_y, nwarps, true>, VDR_Q4_0_Q8_1_MMQ, vec_dot_q4_0_q8_1_mul_mat>
(vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst);
}
extern "C" __global__ void
mul_mat_q4_1(
const void * __restrict__ vx, const void * __restrict__ vy, float * __restrict__ dst,
const int ncols_x, const int nrows_x, const int ncols_y, const int nrows_y, const int nrows_dst) {
const int mmq_x = MMQ_X_Q4_1_AMPERE;
const int mmq_y = MMQ_Y_Q4_1_AMPERE;
const int nwarps = NWARPS_Q4_1_AMPERE;
mul_mat_q<QK4_1, QR4_1, QI4_1, true, block_q4_1, mmq_x, mmq_y, nwarps, allocate_tiles_q4_1<mmq_y>,
load_tiles_q4_1<mmq_y, nwarps, true>, VDR_Q4_1_Q8_1_MMQ, vec_dot_q4_1_q8_1_mul_mat>
(vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst);
}
extern "C" __global__ void
mul_mat_q5_0(
const void * __restrict__ vx, const void * __restrict__ vy, float * __restrict__ dst,
const int ncols_x, const int nrows_x, const int ncols_y, const int nrows_y, const int nrows_dst) {
const int mmq_x = MMQ_X_Q5_0_AMPERE;
const int mmq_y = MMQ_Y_Q5_0_AMPERE;
const int nwarps = NWARPS_Q5_0_AMPERE;
mul_mat_q<QK5_0, QR5_0, QI5_0, false, block_q5_0, mmq_x, mmq_y, nwarps, allocate_tiles_q5_0<mmq_y>,
load_tiles_q5_0<mmq_y, nwarps, true>, VDR_Q5_0_Q8_1_MMQ, vec_dot_q5_0_q8_1_mul_mat>
(vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst);
}
extern "C" __global__ void
mul_mat_q5_1(
const void * __restrict__ vx, const void * __restrict__ vy, float * __restrict__ dst,
const int ncols_x, const int nrows_x, const int ncols_y, const int nrows_y, const int nrows_dst) {
const int mmq_x = MMQ_X_Q5_1_AMPERE;
const int mmq_y = MMQ_Y_Q5_1_AMPERE;
const int nwarps = NWARPS_Q5_1_AMPERE;
mul_mat_q<QK5_1, QR5_1, QI5_1, true, block_q5_1, mmq_x, mmq_y, nwarps, allocate_tiles_q5_1<mmq_y>,
load_tiles_q5_1<mmq_y, nwarps, true>, VDR_Q5_1_Q8_1_MMQ, vec_dot_q5_1_q8_1_mul_mat>
(vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst);
}
extern "C" __global__ void
mul_mat_q8_0(
const void * __restrict__ vx, const void * __restrict__ vy, float * __restrict__ dst,
const int ncols_x, const int nrows_x, const int ncols_y, const int nrows_y, const int nrows_dst) {
const int mmq_x = MMQ_X_Q8_0_AMPERE;
const int mmq_y = MMQ_Y_Q8_0_AMPERE;
const int nwarps = NWARPS_Q8_0_AMPERE;
mul_mat_q<QK8_0, QR8_0, QI8_0, false, block_q8_0, mmq_x, mmq_y, nwarps, allocate_tiles_q8_0<mmq_y>,
load_tiles_q8_0<mmq_y, nwarps, true>, VDR_Q8_0_Q8_1_MMQ, vec_dot_q8_0_q8_1_mul_mat>
(vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst);
}
extern "C" __global__ void
mul_mat_q2_K(
const void * __restrict__ vx, const void * __restrict__ vy, float * __restrict__ dst,
const int ncols_x, const int nrows_x, const int ncols_y, const int nrows_y, const int nrows_dst) {
const int mmq_x = MMQ_X_Q2_K_AMPERE;
const int mmq_y = MMQ_Y_Q2_K_AMPERE;
const int nwarps = NWARPS_Q2_K_AMPERE;
mul_mat_q<QK_K, QR2_K, QI2_K, false, block_q2_K, mmq_x, mmq_y, nwarps, allocate_tiles_q2_K<mmq_y>,
load_tiles_q2_K<mmq_y, nwarps, true>, VDR_Q2_K_Q8_1_MMQ, vec_dot_q2_K_q8_1_mul_mat>
(vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst);
}
extern "C" __global__ void
mul_mat_q3_K(
const void * __restrict__ vx, const void * __restrict__ vy, float * __restrict__ dst,
const int ncols_x, const int nrows_x, const int ncols_y, const int nrows_y, const int nrows_dst) {
const int mmq_x = MMQ_X_Q3_K_AMPERE;
const int mmq_y = MMQ_Y_Q3_K_AMPERE;
const int nwarps = NWARPS_Q3_K_AMPERE;
mul_mat_q<QK_K, QR3_K, QI3_K, false, block_q3_K, mmq_x, mmq_y, nwarps, allocate_tiles_q3_K<mmq_y>,
load_tiles_q3_K<mmq_y, nwarps, true>, VDR_Q3_K_Q8_1_MMQ, vec_dot_q3_K_q8_1_mul_mat>
(vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst);
}
extern "C" __global__ void
mul_mat_q4_K(
const void * __restrict__ vx, const void * __restrict__ vy, float * __restrict__ dst,
const int ncols_x, const int nrows_x, const int ncols_y, const int nrows_y, const int nrows_dst) {
const int mmq_x = MMQ_X_Q4_K_AMPERE;
const int mmq_y = MMQ_Y_Q4_K_AMPERE;
const int nwarps = NWARPS_Q4_K_AMPERE;
mul_mat_q<QK_K, QR4_K, QI4_K, true, block_q4_K, mmq_x, mmq_y, nwarps, allocate_tiles_q4_K<mmq_y>,
load_tiles_q4_K<mmq_y, nwarps, true>, VDR_Q4_K_Q8_1_MMQ, vec_dot_q4_K_q8_1_mul_mat>
(vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst);
}
extern "C" __global__ void
mul_mat_q5_K(
const void * __restrict__ vx, const void * __restrict__ vy, float * __restrict__ dst,
const int ncols_x, const int nrows_x, const int ncols_y, const int nrows_y, const int nrows_dst) {
const int mmq_x = MMQ_X_Q5_K_AMPERE;
const int mmq_y = MMQ_Y_Q5_K_AMPERE;
const int nwarps = NWARPS_Q5_K_AMPERE;
mul_mat_q<QK_K, QR5_K, QI5_K, true, block_q5_K, mmq_x, mmq_y, nwarps, allocate_tiles_q5_K<mmq_y>,
load_tiles_q5_K<mmq_y, nwarps, true>, VDR_Q5_K_Q8_1_MMQ, vec_dot_q5_K_q8_1_mul_mat>
(vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst);
}
extern "C" __global__ void
mul_mat_q6_K(
const void * __restrict__ vx, const void * __restrict__ vy, float * __restrict__ dst,
const int ncols_x, const int nrows_x, const int ncols_y, const int nrows_y, const int nrows_dst) {
const int mmq_x = MMQ_X_Q6_K_AMPERE;
const int mmq_y = MMQ_Y_Q6_K_AMPERE;
const int nwarps = NWARPS_Q6_K_AMPERE;
mul_mat_q<QK_K, QR6_K, QI6_K, false, block_q6_K, mmq_x, mmq_y, nwarps, allocate_tiles_q6_K<mmq_y>,
load_tiles_q6_K<mmq_y, nwarps, true>, VDR_Q6_K_Q8_1_MMQ, vec_dot_q6_K_q8_1_mul_mat>
(vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst);
}
| 1 |
0 | hf_public_repos/candle/candle-kernels | hf_public_repos/candle/candle-kernels/src/cast.cu | #include "cuda_utils.cuh"
#include<stdint.h>
template <typename S, typename T>
__device__ void cast_(
const size_t numel,
const size_t num_dims,
const size_t *info,
const S *inp,
T *out
) {
const size_t *dims = info;
const size_t *strides = info + num_dims;
if (info == nullptr || is_contiguous(num_dims, dims, strides)) {
for (unsigned int i = blockIdx.x * blockDim.x + threadIdx.x; i < numel; i += blockDim.x * gridDim.x) {
out[i] = inp[i];
}
}
else {
for (unsigned int i = blockIdx.x * blockDim.x + threadIdx.x; i < numel; i += blockDim.x * gridDim.x) {
unsigned strided_i = get_strided_index(i, num_dims, dims, strides);
out[i] = inp[strided_i];
}
}
}
template <typename S, typename T, typename I>
__device__ void cast_through(
const size_t numel,
const size_t num_dims,
const size_t *info,
const S *inp,
T *out
) {
const size_t *dims = info;
const size_t *strides = info + num_dims;
if (info == nullptr || is_contiguous(num_dims, dims, strides)) {
for (unsigned int i = blockIdx.x * blockDim.x + threadIdx.x; i < numel; i += blockDim.x * gridDim.x) {
out[i] = static_cast<T>(static_cast<I>(inp[i]));
}
}
else {
for (unsigned int i = blockIdx.x * blockDim.x + threadIdx.x; i < numel; i += blockDim.x * gridDim.x) {
unsigned strided_i = get_strided_index(i, num_dims, dims, strides);
out[i] = static_cast<T>(static_cast<I>(inp[strided_i]));
}
}
}
#define CAST_OP(SRC_TYPENAME, DST_TYPENAME, FN_NAME) \
extern "C" __global__ void FN_NAME( \
const size_t numel, \
const size_t num_dims, \
const size_t *info, \
const SRC_TYPENAME *inp, \
DST_TYPENAME *out \
) { \
cast_<SRC_TYPENAME, DST_TYPENAME>(numel, num_dims, info, inp, out); \
} \
#define CAST_THROUGH_OP(SRC_TYPENAME, DST_TYPENAME, INT_TYPENAME, FN_NAME) \
extern "C" __global__ void FN_NAME( \
const size_t numel, \
const size_t num_dims, \
const size_t *info, \
const SRC_TYPENAME *inp, \
DST_TYPENAME *out \
) { \
cast_through<SRC_TYPENAME, DST_TYPENAME, INT_TYPENAME>(numel, num_dims, info, inp, out); \
} \
#if __CUDA_ARCH__ >= 800
CAST_OP(__nv_bfloat16, __nv_bfloat16, cast_bf16_bf16)
CAST_OP(__nv_bfloat16, uint32_t, cast_bf16_u32)
CAST_OP(__nv_bfloat16, float, cast_bf16_f32)
CAST_OP(__nv_bfloat16, double, cast_bf16_f64)
CAST_OP(uint8_t, __nv_bfloat16, cast_u8_bf16)
CAST_OP(uint32_t, __nv_bfloat16, cast_u32_bf16)
CAST_OP(float, __nv_bfloat16, cast_f32_bf16)
CAST_OP(double, __nv_bfloat16, cast_f64_bf16)
CAST_THROUGH_OP(__nv_bfloat16, uint8_t, float, cast_bf16_u8)
CAST_THROUGH_OP(__nv_bfloat16, __half, float, cast_bf16_f16)
CAST_THROUGH_OP(__half, __nv_bfloat16, float, cast_f16_bf16)
#else
#include <cuda.h>
#if CUDA_VERSION >= 11000
CAST_OP(__nv_bfloat16, float, cast_bf16_f32)
CAST_OP(float, __nv_bfloat16, cast_f32_bf16)
CAST_THROUGH_OP(__nv_bfloat16, uint8_t, float, cast_bf16_u8)
CAST_THROUGH_OP(__nv_bfloat16, __half, float, cast_bf16_f16)
CAST_THROUGH_OP(__nv_bfloat16, double, float, cast_bf16_f64)
CAST_THROUGH_OP(__half, __nv_bfloat16, float, cast_f16_bf16)
CAST_THROUGH_OP(double, __nv_bfloat16, float, cast_f64_bf16)
CAST_THROUGH_OP(uint8_t, __nv_bfloat16, float, cast_u8_bf16)
#endif
#endif
#if __CUDA_ARCH__ >= 530
CAST_OP(__half, __half, cast_f16_f16)
CAST_THROUGH_OP(__half, uint8_t, float, cast_f16_u8)
CAST_OP(__half, uint32_t, cast_f16_u32)
CAST_OP(__half, float, cast_f16_f32)
CAST_OP(__half, double, cast_f16_f64)
CAST_OP(uint8_t, __half, cast_u8_f16 )
CAST_OP(uint32_t, __half, cast_u32_f16)
CAST_OP(float, __half, cast_f32_f16)
CAST_OP(double, __half, cast_f64_f16)
#endif
CAST_OP(uint32_t, uint32_t, cast_u32_u32)
CAST_OP(uint32_t, uint8_t, cast_u32_u8 )
CAST_OP(uint32_t, int64_t, cast_u32_i64 )
CAST_OP(uint32_t, float, cast_u32_f32)
CAST_OP(uint32_t, double, cast_u32_f64)
CAST_OP(uint8_t, uint32_t, cast_u8_u32)
CAST_OP(uint8_t, uint8_t, cast_u8_u8 )
CAST_OP(uint8_t, int64_t, cast_u8_i64 )
CAST_OP(uint8_t, float, cast_u8_f32)
CAST_OP(uint8_t, double, cast_u8_f64)
CAST_OP(int64_t, uint32_t, cast_i64_u32)
CAST_OP(int64_t, uint8_t, cast_i64_u8 )
CAST_OP(int64_t, int64_t, cast_i64_i64 )
CAST_OP(int64_t, float, cast_i64_f32)
CAST_OP(int64_t, double, cast_i64_f64)
CAST_OP(float, uint8_t, cast_f32_u8 )
CAST_OP(float, uint32_t, cast_f32_u32)
CAST_OP(float, int64_t, cast_f32_i64 )
CAST_OP(float, float, cast_f32_f32)
CAST_OP(float, double, cast_f32_f64)
CAST_OP(double, uint8_t, cast_f64_u8 )
CAST_OP(double, uint32_t, cast_f64_u32)
CAST_OP(double, int64_t, cast_f64_i64 )
CAST_OP(double, float, cast_f64_f32)
CAST_OP(double, double, cast_f64_f64)
| 2 |
0 | hf_public_repos/candle/candle-kernels | hf_public_repos/candle/candle-kernels/src/ternary.cu | #include "cuda_utils.cuh"
#include<stdint.h>
#define WHERE_OP(TYPENAME, ID_TYPENAME, FN_NAME) \
extern "C" __global__ void FN_NAME( \
const size_t numel, \
const size_t num_dims, \
const size_t *info, \
const ID_TYPENAME *ids, \
const TYPENAME *t, \
const TYPENAME *f, \
TYPENAME *out \
) { \
const size_t *dims = info; \
const size_t *strides = info + num_dims; \
const size_t *strides_t = info + 2*num_dims; \
const size_t *strides_f = info + 3*num_dims; \
if (is_contiguous(num_dims, dims, strides) \
&& is_contiguous(num_dims, dims, strides_f) \
&& is_contiguous(num_dims, dims, strides_t)) { \
for (unsigned int i = blockIdx.x * blockDim.x + threadIdx.x; i < numel; i += blockDim.x * gridDim.x) { \
out[i] = ids[i] ? t[i] : f[i]; \
} \
} \
else { \
for (unsigned int i = blockIdx.x * blockDim.x + threadIdx.x; i < numel; i += blockDim.x * gridDim.x) { \
unsigned strided_i = get_strided_index(i, num_dims, dims, strides); \
unsigned strided_i_t = get_strided_index(i, num_dims, dims, strides_t); \
unsigned strided_i_f = get_strided_index(i, num_dims, dims, strides_f); \
out[i] = ids[strided_i] ? t[strided_i_t] : f[strided_i_f]; \
} \
} \
} \
#if __CUDA_ARCH__ >= 800
WHERE_OP(__nv_bfloat16, int64_t, where_i64_bf16)
WHERE_OP(__nv_bfloat16, uint32_t, where_u32_bf16)
WHERE_OP(__nv_bfloat16, uint8_t, where_u8_bf16)
#endif
#if __CUDA_ARCH__ >= 530
WHERE_OP(__half, int64_t, where_i64_f16)
WHERE_OP(__half, uint32_t, where_u32_f16)
WHERE_OP(__half, uint8_t, where_u8_f16)
#endif
WHERE_OP(float, int64_t, where_i64_f32)
WHERE_OP(double, int64_t, where_i64_f64)
WHERE_OP(uint8_t, int64_t, where_i64_u8)
WHERE_OP(uint32_t, int64_t, where_i64_u32)
WHERE_OP(int64_t, int64_t, where_i64_i64)
WHERE_OP(float, uint32_t, where_u32_f32)
WHERE_OP(double, uint32_t, where_u32_f64)
WHERE_OP(uint8_t, uint32_t, where_u32_u8)
WHERE_OP(uint32_t, uint32_t, where_u32_u32)
WHERE_OP(int64_t, uint32_t, where_u32_i64)
WHERE_OP(float, uint8_t, where_u8_f32)
WHERE_OP(double, uint8_t, where_u8_f64)
WHERE_OP(uint8_t, uint8_t, where_u8_u8)
WHERE_OP(uint32_t, uint8_t, where_u8_u32)
WHERE_OP(int64_t, uint8_t, where_u8_i64)
| 3 |
0 | hf_public_repos/candle/candle-kernels | hf_public_repos/candle/candle-kernels/src/compatibility.cuh | #include "cuda_fp16.h"
#include "cuda_bf16.h"
// Table showing which features are supported on which compute capability
// https://docs.nvidia.com/cuda/cuda-c-programming-guide/#features-and-technical-specifications
// FIXME: the minimum compute capabilities are just guesses since the table is not specific enough
#if (__CUDACC_VER_MAJOR__ < 12 || __CUDACC_VER_MINOR__ < 2) && __CUDA_ARCH__ < 800
__device__ __forceinline__ __half __hmax_nan(__half a, __half b) {
return __hisnan(a) ? a : (__hisnan(b) ? b : __hmax(a, b));
}
__device__ __forceinline__ __half __hmin_nan(__half a, __half b) {
return __hisnan(a) ? a : (__hisnan(b) ? b : __hmin(a, b));
}
#endif
#if __CUDA_ARCH__ < 600
// Copied from https://docs.nvidia.com/cuda/cuda-c-programming-guide/#atomic-functions
__device__ double atomicAdd(double* address, double val) {
unsigned long long int* address_as_ull = (unsigned long long int*)address;
unsigned long long int old = *address_as_ull, assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull, assumed,
__double_as_longlong(val +
__longlong_as_double(assumed)));
// Note: uses integer comparison to avoid hang in case of NaN (since NaN != NaN)
} while (assumed != old);
return __longlong_as_double(old);
}
#endif
#if __CUDA_ARCH__ < 700
// https://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html#atomicadd
// The 16-bit __half floating-point version of atomicAdd() is only supported by devices of compute capability 7.x and higher.
// Solution adapted from https://github.com/torch/cutorch/blob/master/lib/THC/THCAtomics.cuh#L96-L119
__device__ __half atomicAdd(__half *address, __half val) {
// unsigned int *address_as_ui = (unsigned int *) ((char *)address - ((size_t)address & 2));
// unsigned int old = *address_as_ui;
// unsigned int assumed;
// bool unaligned = (size_t) address & 2;
// do {
// assumed = old;
// unsigned int hsum;
// hsum = unaligned ? (old >> 16) : (old & 0xffff);
// hsum = __half_as_ushort(__ushort_as_half(hsum) + val);
// old = atomicCAS(address_as_ui, assumed,
// unaligned ? (old & 0xffff) | (hsum << 16) : (old & 0xffff0000) | hsum
// );
// } while (assumed != old);
// return __ushort_as_half(unaligned ? (old >> 16) : (old & 0xffff));
}
#endif
__device__ __forceinline__ __half atomicMaxf(__half* address, __half val) {
#if __CUDA_ARCH__ < 700
// On older GPUs we do not have access to atomicCAS for shorts, so we have to do some trickery.
// Solution adapted from https://github.com/torch/cutorch/blob/master/lib/THC/THCAtomics.cuh#L96-L119
unsigned int *address_as_ui = (unsigned int *) ((char *)address - ((size_t)address & 2));
unsigned int old = *address_as_ui;
unsigned int assumed;
bool unaligned = (size_t) address & 2;
do {
assumed = old;
unsigned int hmax;
hmax = unaligned ? (old >> 16) : (old & 0xffff);
hmax = __half_as_ushort(__hmax_nan(val, __ushort_as_half(hmax)));
old = atomicCAS(address_as_ui, assumed,
unaligned ? (old & 0xffff) | (hmax << 16) : (old & 0xffff0000) | hmax
);
} while (assumed != old);
return __ushort_as_half(unaligned ? (old >> 16) : (old & 0xffff));
#else
// Based on https://docs.nvidia.com/cuda/cuda-c-programming-guide/#atomic-functions
unsigned short int* casted_address = (unsigned short int*)address;
unsigned short int old = *casted_address;
unsigned short int assumed;
do {
assumed = old;
old = atomicCAS(casted_address, assumed, __half_as_ushort(__hmax_nan(val, __ushort_as_half(assumed))));
// Note: uses integer comparison to avoid hang in case of NaN (since NaN != NaN)
} while (assumed != old);
return __ushort_as_half(old);
#endif
}
// atomicMax is not implemented for floats,
// solution copied https://stackoverflow.com/questions/17399119/how-do-i-use-atomicmax-on-floating-point-values-in-cuda
__device__ __forceinline__ float atomicMaxf(float * addr, float value) {
if (signbit(value)) {
return __uint_as_float(atomicMin((unsigned int *)addr, __float_as_uint(value)));
} else {
return __int_as_float(atomicMax((int *)addr, __float_as_int(value)));
}
}
__device__ __forceinline__ double atomicMaxf(double * addr, double value) {
if (signbit(value)) {
return __longlong_as_double(atomicMin((unsigned long long int *)addr, __double_as_longlong(value)));
} else {
return __longlong_as_double(atomicMax((long long int *)addr, __double_as_longlong(value)));
}
}
__device__ __forceinline__ __half atomicMinf(__half* address, __half val) {
#if __CUDA_ARCH__ < 700
// On older GPUs we do not have access to atomicCAS for shorts, so we have to do some trickery.
// Solution adapted from https://github.com/torch/cutorch/blob/master/lib/THC/THCAtomics.cuh#L96-L119
unsigned int *address_as_ui = (unsigned int *) ((char *)address - ((size_t)address & 2));
unsigned int old = *address_as_ui;
unsigned int assumed;
bool unaligned = (size_t) address & 2;
do {
assumed = old;
unsigned int hmin;
hmin = unaligned ? (old >> 16) : (old & 0xffff);
hmin = __half_as_ushort(__hmin_nan(val, __ushort_as_half(hmin)));
old = atomicCAS(address_as_ui, assumed,
unaligned ? (old & 0xffff) | (hmin << 16) : (old & 0xffff0000) | hmin
);
} while (assumed != old);
return __ushort_as_half(unaligned ? (old >> 16) : (old & 0xffff));
#else
// Based on https://docs.nvidia.com/cuda/cuda-c-programming-guide/#atomic-functions
unsigned short int* casted_address = (unsigned short int*)address;
unsigned short int old = *casted_address;
unsigned short int assumed;
do {
assumed = old;
old = atomicCAS(casted_address, assumed, __half_as_ushort(__hmin_nan(val, __ushort_as_half(assumed))));
// Note: uses integer comparison to avoid hang in case of NaN (since NaN != NaN)
} while (assumed != old);
return __ushort_as_half(old);
#endif
}
// atomicMin is not implemented for floats,
// solution copied https://stackoverflow.com/questions/17399119/how-do-i-use-atomicmax-on-floating-point-values-in-cuda
__device__ __forceinline__ float atomicMinf(float * addr, float value) {
if (signbit(value)) {
return __uint_as_float(atomicMax((unsigned int *)addr, __float_as_uint(value)));
} else {
return __int_as_float(atomicMin((int *)addr, __float_as_int(value)));
}
}
__device__ __forceinline__ double atomicMinf(double * addr, double value) {
if (signbit(value)) {
return __longlong_as_double(atomicMax((unsigned long long int *)addr, __double_as_longlong(value)));
} else {
return __longlong_as_double(atomicMin((long long int *)addr, __double_as_longlong(value)));
}
}
| 4 |
0 | hf_public_repos/candle/candle-kernels | hf_public_repos/candle/candle-kernels/src/indexing.cu | // WARNING: THIS IS ONLY VALID ASSUMING THAT inp IS CONTIGUOUS!
// TODO: proper error reporting when ids are larger than v_size.
#include "cuda_utils.cuh"
#include<stdint.h>
template<typename T, typename I>
__device__ void index_select(
const size_t numel,
const size_t num_dims,
const size_t *info,
const I *ids,
const T *inp,
T *out,
const size_t left_size,
const size_t src_dim_size,
const size_t ids_dim_size,
const size_t right_size
) {
const size_t *dims = info;
const size_t *strides = info + num_dims;
bool b = is_contiguous(num_dims, dims, strides);
for (unsigned int dst_i = blockIdx.x * blockDim.x + threadIdx.x; dst_i < numel; dst_i += blockDim.x * gridDim.x) {
unsigned int left_i = dst_i / (ids_dim_size * right_size);
unsigned int id_i = dst_i / right_size % ids_dim_size;
unsigned int right_i = dst_i % right_size;
unsigned int src_i = left_i * (src_dim_size * right_size) + ids[id_i] * right_size + right_i;
unsigned strided_i = b ? src_i : get_strided_index(src_i, num_dims, dims, strides);
out[dst_i] = inp[strided_i];
}
}
#define IS_OP(TYPENAME, INDEX_TYPENAME, FN_NAME) \
extern "C" __global__ void FN_NAME( \
const size_t numel, \
const size_t num_dims, \
const size_t *info, \
const INDEX_TYPENAME *ids, \
const TYPENAME *inp, \
TYPENAME *out, \
const size_t left_size, \
const size_t src_dim_size, \
const size_t ids_dim_size, \
const size_t right_size \
) { index_select(numel, num_dims, info, ids, inp, out, left_size, src_dim_size, ids_dim_size, right_size); } \
template<typename T, typename I>
__device__ void gather(
const size_t numel,
const I *ids,
const T *inp,
T *out,
const size_t left_size,
const size_t src_dim_size,
const size_t ids_dim_size,
const size_t right_size
) {
for (unsigned int i = blockIdx.x * blockDim.x + threadIdx.x; i < numel; i += blockDim.x * gridDim.x) {
size_t post = i % right_size;
size_t idx = ids[i];
size_t pre = i / (right_size * ids_dim_size);
size_t src_i = (pre * src_dim_size + idx) * right_size + post;
out[i] = inp[src_i];
}
}
#define GATHER_OP(TYPENAME, INDEX_TYPENAME, FN_NAME) \
extern "C" __global__ void FN_NAME( \
const size_t numel, \
const INDEX_TYPENAME *ids, \
const TYPENAME *inp, \
TYPENAME *out, \
const size_t left_size, \
const size_t src_dim_size, \
const size_t ids_dim_size, \
const size_t right_size \
) { gather(numel, ids, inp, out, left_size, src_dim_size, ids_dim_size, right_size); } \
template<typename T, typename I>
__device__ void index_add(
const I *ids,
const size_t ids_dim_size,
const T *inp,
T *out,
const size_t left_size,
const size_t src_dim_size,
const size_t dst_dim_size,
const size_t right_size
) {
const size_t numel = left_size * right_size;
for (unsigned int i = blockIdx.x * blockDim.x + threadIdx.x; i < numel; i += blockDim.x * gridDim.x) {
const size_t pre = i / right_size;
const size_t post = i % right_size;
for (unsigned int j = 0; j < ids_dim_size; ++j) {
const size_t idx = ids[j];
const size_t src_i = (pre * ids_dim_size + j) * right_size + post;
const size_t dst_i = (pre * dst_dim_size + idx) * right_size + post;
out[dst_i] += inp[src_i];
}
}
}
#define IA_OP(TYPENAME, INDEX_TYPENAME, FN_NAME) \
extern "C" __global__ void FN_NAME( \
const INDEX_TYPENAME *ids, \
const size_t ids_dim_size, \
const TYPENAME *inp, \
TYPENAME *out, \
const size_t left_size, \
const size_t src_dim_size, \
const size_t dst_dim_size, \
const size_t right_size \
) { index_add(ids, ids_dim_size, inp, out, left_size, src_dim_size, dst_dim_size, right_size); } \
template<typename T, typename I>
__device__ void scatter_add(
const I *ids,
const T *inp,
T *out,
const size_t left_size,
const size_t src_dim_size,
const size_t dst_dim_size,
const size_t right_size
) {
const size_t numel = left_size * right_size;
for (unsigned int i = blockIdx.x * blockDim.x + threadIdx.x; i < numel; i += blockDim.x * gridDim.x) {
const size_t pre = i / right_size;
const size_t post = i % right_size;
for (unsigned int j = 0; j < src_dim_size; ++j) {
const size_t src_i = (pre * src_dim_size + j) * right_size + post;
const size_t idx = ids[src_i];
const size_t dst_i = (pre * dst_dim_size + idx) * right_size + post;
out[dst_i] += inp[src_i];
}
}
}
#define SA_OP(TYPENAME, INDEX_TYPENAME, FN_NAME) \
extern "C" __global__ void FN_NAME( \
const INDEX_TYPENAME *ids, \
const TYPENAME *inp, \
TYPENAME *out, \
const size_t left_size, \
const size_t src_dim_size, \
const size_t dst_dim_size, \
const size_t right_size \
) { scatter_add(ids, inp, out, left_size, src_dim_size, dst_dim_size, right_size); } \
#if __CUDA_ARCH__ >= 800
IS_OP(__nv_bfloat16, int64_t, is_i64_bf16)
IS_OP(__nv_bfloat16, uint32_t, is_u32_bf16)
IS_OP(__nv_bfloat16, uint8_t, is_u8_bf16)
GATHER_OP(__nv_bfloat16, int64_t, gather_i64_bf16)
GATHER_OP(__nv_bfloat16, uint32_t, gather_u32_bf16)
GATHER_OP(__nv_bfloat16, uint8_t, gather_u8_bf16)
IA_OP(__nv_bfloat16, int64_t, ia_i64_bf16)
IA_OP(__nv_bfloat16, uint32_t, ia_u32_bf16)
IA_OP(__nv_bfloat16, uint8_t, ia_u8_bf16)
SA_OP(__nv_bfloat16, int64_t, sa_i64_bf16)
SA_OP(__nv_bfloat16, uint32_t, sa_u32_bf16)
SA_OP(__nv_bfloat16, uint8_t, sa_u8_bf16)
#endif
#if __CUDA_ARCH__ >= 530
IS_OP(__half, int64_t, is_i64_f16)
IS_OP(__half, uint32_t, is_u32_f16)
IS_OP(__half, uint8_t, is_u8_f16)
GATHER_OP(__half, int64_t, gather_i64_f16)
GATHER_OP(__half, uint32_t, gather_u32_f16)
GATHER_OP(__half, uint8_t, gather_u8_f16)
IA_OP(__half, int64_t, ia_i64_f16)
IA_OP(__half, uint32_t, ia_u32_f16)
IA_OP(__half, uint8_t, ia_u8_f16)
SA_OP(__half, int64_t, sa_i64_f16)
SA_OP(__half, uint32_t, sa_u32_f16)
SA_OP(__half, uint8_t, sa_u8_f16)
#endif
IS_OP(float, int64_t, is_i64_f32)
IS_OP(double, int64_t, is_i64_f64)
IS_OP(uint8_t, int64_t, is_i64_u8)
IS_OP(uint32_t, int64_t, is_i64_u32)
IS_OP(int64_t, int64_t, is_i64_i64)
IS_OP(float, uint32_t, is_u32_f32)
IS_OP(double, uint32_t, is_u32_f64)
IS_OP(uint8_t, uint32_t, is_u32_u8)
IS_OP(int64_t, uint32_t, is_u32_i64)
IS_OP(uint32_t, uint32_t, is_u32_u32)
IS_OP(float, uint8_t, is_u8_f32)
IS_OP(double, uint8_t, is_u8_f64)
IS_OP(uint8_t, uint8_t, is_u8_u8)
IS_OP(uint32_t, uint8_t, is_u8_u32)
IS_OP(int64_t, uint8_t, is_u8_i64)
GATHER_OP(float, int64_t, gather_i64_f32)
GATHER_OP(double, int64_t, gather_i64_f64)
GATHER_OP(uint8_t, int64_t, gather_i64_u8)
GATHER_OP(uint32_t, int64_t, gather_i64_u32)
GATHER_OP(int64_t, int64_t, gather_i64_i64)
GATHER_OP(float, uint32_t, gather_u32_f32)
GATHER_OP(double, uint32_t, gather_u32_f64)
GATHER_OP(uint8_t, uint32_t, gather_u32_u8)
GATHER_OP(int64_t, uint32_t, gather_u32_i64)
GATHER_OP(uint32_t, uint32_t, gather_u32_u32)
GATHER_OP(float, uint8_t, gather_u8_f32)
GATHER_OP(double, uint8_t, gather_u8_f64)
GATHER_OP(uint8_t, uint8_t, gather_u8_u8)
GATHER_OP(uint32_t, uint8_t, gather_u8_u32)
GATHER_OP(int64_t, uint8_t, gather_u8_i64)
IA_OP(float, int64_t, ia_i64_f32)
IA_OP(double, int64_t, ia_i64_f64)
IA_OP(uint8_t, int64_t, ia_i64_u8)
IA_OP(int64_t, int64_t, ia_i64_i64)
IA_OP(uint32_t, int64_t, ia_i64_u32)
IA_OP(float, uint32_t, ia_u32_f32)
IA_OP(double, uint32_t, ia_u32_f64)
IA_OP(uint8_t, uint32_t, ia_u32_u8)
IA_OP(int64_t, uint32_t, ia_u32_i64)
IA_OP(uint32_t, uint32_t, ia_u32_u32)
IA_OP(float, uint8_t, ia_u8_f32)
IA_OP(double, uint8_t, ia_u8_f64)
IA_OP(uint8_t, uint8_t, ia_u8_u8)
IA_OP(uint32_t, uint8_t, ia_u8_u32)
IA_OP(int64_t, uint8_t, ia_u8_i64)
SA_OP(float, int64_t, sa_i64_f32)
SA_OP(double, int64_t, sa_i64_f64)
SA_OP(uint8_t, int64_t, sa_i64_u8)
SA_OP(int64_t, int64_t, sa_i64_i64)
SA_OP(uint32_t, int64_t, sa_i64_u32)
SA_OP(float, uint32_t, sa_u32_f32)
SA_OP(double, uint32_t, sa_u32_f64)
SA_OP(uint8_t, uint32_t, sa_u32_u8)
SA_OP(int64_t, uint32_t, sa_u32_i64)
SA_OP(uint32_t, uint32_t, sa_u32_u32)
SA_OP(float, uint8_t, sa_u8_f32)
SA_OP(double, uint8_t, sa_u8_f64)
SA_OP(uint8_t, uint8_t, sa_u8_u8)
SA_OP(uint32_t, uint8_t, sa_u8_u32)
SA_OP(int64_t, uint8_t, sa_u8_i64)
| 5 |
0 | hf_public_repos/candle/candle-kernels | hf_public_repos/candle/candle-kernels/src/affine.cu | #include "cuda_utils.cuh"
#include<stdint.h>
#define AFFINE_OP(TYPENAME, FN_NAME) \
extern "C" __global__ void FN_NAME( \
const size_t numel, \
const size_t num_dims, \
const size_t *info, \
const TYPENAME *inp, \
TYPENAME *out, \
const TYPENAME mul, \
const TYPENAME add \
) { \
const size_t *dims = info; \
const size_t *strides = info + num_dims; \
if (info == nullptr || is_contiguous(num_dims, dims, strides)) { \
for (unsigned int i = blockIdx.x * blockDim.x + threadIdx.x; i < numel; i += blockDim.x * gridDim.x) { \
TYPENAME x = inp ? inp[i] : out[i]; \
out[i] = x * mul + add; \
} \
} \
else { \
for (unsigned int i = blockIdx.x * blockDim.x + threadIdx.x; i < numel; i += blockDim.x * gridDim.x) { \
unsigned strided_i = get_strided_index(i, num_dims, dims, strides); \
TYPENAME x = inp ? inp[strided_i] : out[i]; \
out[i] = x * mul + add; \
} \
} \
} \
#if __CUDA_ARCH__ >= 800
AFFINE_OP(__nv_bfloat16, affine_bf16)
#endif
#if __CUDA_ARCH__ >= 530
AFFINE_OP(__half, affine_f16)
#endif
AFFINE_OP(float, affine_f32)
AFFINE_OP(double, affine_f64)
AFFINE_OP(uint8_t, affine_u8)
AFFINE_OP(uint32_t, affine_u32)
AFFINE_OP(int64_t, affine_i64)
| 6 |
0 | hf_public_repos/candle/candle-kernels | hf_public_repos/candle/candle-kernels/src/conv.cu | #include "cuda_utils.cuh"
#include<stdint.h>
// Naive implementation of conv1d.
template <typename T, typename A>
__device__ void conv1d(
const size_t src_numel,
const size_t l_out,
const size_t stride,
const size_t padding,
const size_t dilation,
const size_t *info,
const T *src,
const T *kernel,
T *dst
) {
// src: (b_size, c_in, l_in)
// k: (c_out, c_in, k_size)
const size_t *src_dims = info;
const size_t *src_s = info + 3;
const size_t *k_dims = info + 6;
const size_t *k_s = info + 9;
const size_t dst_i = blockIdx.x * blockDim.x + threadIdx.x;
const size_t k_size = k_dims[2];
const size_t c_out = k_dims[0];
const size_t c_in = src_dims[1];
const size_t l_in = src_dims[2];
if (dst_i >= src_dims[0] * c_out * l_out) {
return;
}
// TODO
const size_t b_idx = dst_i / (l_out * c_out);
const size_t dst_c_idx = (dst_i / l_out) % c_out;
const size_t dst_l = dst_i % l_out;
const size_t src_idx0 = b_idx * src_s[0];
A d = 0;
for (size_t offset = 0; offset < k_size; ++offset) {
size_t src_l = (stride * dst_l + offset) * dilation;
if (src_l < padding || src_l >= padding + l_in) {
continue;
}
src_l -= padding;
for (size_t src_c_idx = 0; src_c_idx < c_in; ++src_c_idx) {
const size_t src_idx = src_idx0 + src_c_idx * src_s[1] + src_l * src_s[2];
const size_t k_idx = dst_c_idx * k_s[0] + src_c_idx * k_s[1] + offset * k_s[2];
d += static_cast<A>(src[src_idx]) * static_cast<A>(kernel[k_idx]);
}
}
dst[dst_i] = static_cast<T>(d);
}
template <typename T>
__device__ void im2col1d(
const size_t dst_numel,
const size_t l_out,
const size_t l_k,
const size_t stride,
const size_t padding,
const size_t dilation,
const size_t *info,
const T *src,
T *dst
) {
const size_t dst_i = blockIdx.x * blockDim.x + threadIdx.x;
// dst: (b_size, l_out, c_in, l_k)
// src: (b_size, c_in, l_in)
if (dst_i >= dst_numel) {
return;
}
const size_t *src_dims = info;
const size_t *src_s = info + 3;
const size_t c_in = src_dims[1];
const size_t l_in = src_dims[2];
const size_t dst_s2 = l_k;
const size_t dst_s1 = c_in * dst_s2;
const size_t dst_s0 = l_out * dst_s1;
size_t tmp_dst_i = dst_i;
const size_t b_idx = tmp_dst_i / dst_s0;
tmp_dst_i -= b_idx * dst_s0;
const size_t l_idx = tmp_dst_i / dst_s1;
tmp_dst_i -= l_idx * dst_s1;
const size_t c_idx = tmp_dst_i / dst_s2;
tmp_dst_i -= c_idx * dst_s2;
const size_t l_k_idx = tmp_dst_i;
size_t src_l_idx = l_idx * stride + l_k_idx * dilation;
if (src_l_idx < padding || src_l_idx >= l_in + padding) {
dst[dst_i] = static_cast<T>(0);
}
else {
src_l_idx -= padding;
const size_t src_i = b_idx * src_s[0] + c_idx * src_s[1] + src_l_idx * src_s[2];
dst[dst_i] = src[src_i];
}
}
template <typename T>
__device__ void col2im1d(
const size_t dst_el,
const size_t l_out,
const size_t l_in,
const size_t c_out,
const size_t k_size,
const size_t stride,
const T *src,
T *dst
) {
const size_t dst_i = blockIdx.x * blockDim.x + threadIdx.x;
// src: (b_size, l_in, c_out, l_k)
// dst: (b_size, c_out, l_out)
if (dst_i >= dst_el) {
return;
}
const size_t dst_s0 = c_out * l_out;
const size_t dst_s1 = l_out;
const size_t src_s0 = c_out * k_size * l_in;
const size_t src_s1 = c_out * k_size;
const size_t src_s2 = k_size;
size_t tmp_dst_i = dst_i;
const size_t b_idx = tmp_dst_i / dst_s0;
tmp_dst_i -= b_idx * dst_s0;
const size_t c_idx = tmp_dst_i / dst_s1;
tmp_dst_i -= c_idx * dst_s1;
const int l_out_idx = tmp_dst_i;
dst[dst_i] = static_cast<T>(0);
int l_in_idx = l_out_idx / stride;
int k0 = l_out_idx - l_in_idx * stride;
// l_out_idx = l_in_idx * stride + k0
for (; k0 < k_size && l_in_idx >= 0; k0 += stride, --l_in_idx) {
if (l_in_idx < l_in) {
const size_t src_i = b_idx * src_s0 + l_in_idx * src_s1 + c_idx * src_s2 + k0;
dst[dst_i] += src[src_i];
}
}
}
template <typename T>
__device__ void im2col(
const size_t dst_numel,
const size_t h_out,
const size_t w_out,
const size_t h_k,
const size_t w_k,
const size_t stride,
const size_t padding,
const size_t dilation,
const size_t *info,
const T *src,
T *dst
) {
const size_t dst_i = blockIdx.x * blockDim.x + threadIdx.x;
// dst: (b_size, h_out, w_out, c_in, h_k, w_k)
// src: (b_size, c_in, h_in, w_in)
if (dst_i >= dst_numel) {
return;
}
const size_t *src_dims = info;
const size_t *src_s = info + 4;
const size_t c_in = src_dims[1];
const size_t h_in = src_dims[2];
const size_t w_in = src_dims[3];
const size_t dst_s4 = w_k;
const size_t dst_s3 = h_k * dst_s4;
const size_t dst_s2 = c_in * dst_s3;
const size_t dst_s1 = w_out * dst_s2;
const size_t dst_s0 = h_out * dst_s1;
size_t tmp_dst_i = dst_i;
const size_t b_idx = tmp_dst_i / dst_s0;
tmp_dst_i -= b_idx * dst_s0;
const size_t h_idx = tmp_dst_i / dst_s1;
tmp_dst_i -= h_idx * dst_s1;
const size_t w_idx = tmp_dst_i / dst_s2;
tmp_dst_i -= w_idx * dst_s2;
const size_t c_idx = tmp_dst_i / dst_s3;
tmp_dst_i -= c_idx * dst_s3;
const size_t h_k_idx = tmp_dst_i / dst_s4;
tmp_dst_i -= h_k_idx * dst_s4;
const size_t w_k_idx = tmp_dst_i;
size_t src_h_idx = h_idx * stride + h_k_idx * dilation;
size_t src_w_idx = w_idx * stride + w_k_idx * dilation;
if (src_h_idx < padding || src_h_idx >= h_in + padding) {
dst[dst_i] = static_cast<T>(0);
}
else if (src_w_idx < padding || src_w_idx >= w_in + padding) {
dst[dst_i] = static_cast<T>(0);
}
else {
src_h_idx -= padding;
src_w_idx -= padding;
const size_t src_i =
b_idx * src_s[0]
+ c_idx * src_s[1]
+ src_h_idx * src_s[2]
+ src_w_idx * src_s[3];
dst[dst_i] = src[src_i];
}
}
// Naive implementation of conv2d.
template <typename T, typename A>
__device__ void conv2d(
const size_t src_numel,
const size_t w_out,
const size_t h_out,
const size_t stride,
const size_t padding,
const size_t dilation,
const size_t *info,
const T *src,
const T *kernel,
T *dst
) {
const size_t dst_i = blockIdx.x * blockDim.x + threadIdx.x;
// src: (b_size, c_in, h_in, w_in)
// k: (c_out, c_in, h_k, w_k)
const size_t *src_dims = info;
const size_t *src_s = info + 4;
const size_t *k_dims = info + 8;
const size_t *k_s = info + 12;
const size_t h_k = k_dims[2];
const size_t w_k = k_dims[3];
const size_t c_out = k_dims[0];
const size_t c_in = src_dims[1];
const size_t h_in = src_dims[2];
const size_t w_in = src_dims[3];
if (dst_i >= src_dims[0] * c_out * w_out * h_out) {
return;
}
// TODO
const size_t b_idx = dst_i / (w_out * h_out * c_out);
const size_t dst_c_idx = (dst_i / (w_out * h_out)) % c_out;
// NCHW layout.
const size_t dst_h = (dst_i / w_out) % h_out;
const size_t dst_w = dst_i % w_out;
const size_t src_idx0 = b_idx * src_s[0];
A d = 0;
for (size_t w_offset = 0; w_offset < w_k; ++w_offset) {
size_t src_w = stride * dst_w + w_offset * dilation;
if (src_w < padding || src_w >= w_in + padding) {
continue;
}
src_w -= padding;
for (size_t h_offset = 0; h_offset < h_k; ++h_offset) {
size_t src_h = stride * dst_h + h_offset * dilation;
if (src_h < padding || src_h >= h_in + padding) {
continue;
}
src_h -= padding;
for (size_t src_c_idx = 0; src_c_idx < c_in; ++src_c_idx) {
const size_t src_idx = src_idx0 + src_c_idx * src_s[1] + src_h * src_s[2] + src_w * src_s[3];
const size_t k_idx = dst_c_idx * k_s[0] + src_c_idx * k_s[1] + h_offset * k_s[2] + w_offset * k_s[3];
d += static_cast<A>(src[src_idx]) * static_cast<A>(kernel[k_idx]);
}
}
}
dst[dst_i] = static_cast<T>(d);
}
// Naive implementation of conv_transpose1d.
template <typename T, typename A>
__device__ void conv_transpose1d(
const size_t src_numel,
const size_t l_out,
const size_t stride,
const size_t padding,
const size_t out_padding,
const size_t dilation,
const size_t *info,
const T *src,
const T *kernel,
T *dst
) {
const size_t dst_i = blockIdx.x * blockDim.x + threadIdx.x;
// src: (b_size, c_in, l_in)
// k: (c_in, c_out, l_k)
const size_t *src_dims = info;
const size_t *src_s = info + 3;
const size_t *k_dims = info + 6;
const size_t *k_s = info + 9;
const size_t l_k = k_dims[2];
const size_t c_out = k_dims[1];
const size_t c_in = src_dims[1];
const size_t l_in = src_dims[2];
if (dst_i >= src_dims[0] * c_out * l_out) {
return;
}
// TODO
const size_t b_idx = dst_i / (l_out * c_out);
const size_t dst_c_idx = (dst_i / l_out) % c_out;
// NCL layout.
const size_t out_x = dst_i % l_out;
const size_t src_idx0 = b_idx * src_s[0];
A d = 0;
for (int k_x = 0; k_x < (int)l_k; ++k_x) {
// let out_x = inp_x * p.stride + k_x * p.dilation - p.padding;
int inp_x_stride = (int)(out_x + padding) - k_x * dilation;
if (inp_x_stride < 0 || inp_x_stride % stride) {
continue;
}
int inp_x = inp_x_stride / stride;
if (inp_x >= l_in) continue;
for (size_t src_c_idx = 0; src_c_idx < c_in; ++src_c_idx) {
const size_t src_idx = src_idx0 + src_c_idx * src_s[1] + inp_x * src_s[2];
const size_t k_idx = src_c_idx * k_s[0] + dst_c_idx * k_s[1] + k_x * k_s[2];
d += static_cast<A>(src[src_idx]) * static_cast<A>(kernel[k_idx]);
}
}
dst[dst_i] = static_cast<T>(d);
}
// Naive implementation of conv_transpose2d.
template <typename T, typename A>
__device__ void conv_transpose2d(
const size_t src_numel,
const size_t w_out,
const size_t h_out,
const size_t stride,
const size_t padding,
const size_t out_padding,
const size_t dilation,
const size_t *info,
const T *src,
const T *kernel,
T *dst
) {
const size_t dst_i = blockIdx.x * blockDim.x + threadIdx.x;
// src: (b_size, c_in, h_in, w_in)
// k: (c_in, c_out, h_k, w_k)
const size_t *src_dims = info;
const size_t *src_s = info + 4;
const size_t *k_dims = info + 8;
const size_t *k_s = info + 12;
const size_t h_k = k_dims[2];
const size_t w_k = k_dims[3];
const size_t c_out = k_dims[1];
const size_t c_in = src_dims[1];
const size_t h_in = src_dims[2];
const size_t w_in = src_dims[3];
if (dst_i >= src_dims[0] * c_out * w_out * h_out) {
return;
}
// TODO
const size_t b_idx = dst_i / (w_out * h_out * c_out);
const size_t dst_c_idx = (dst_i / (w_out * h_out)) % c_out;
// NCHW layout.
const size_t out_y = (dst_i / w_out) % h_out;
const size_t out_x = dst_i % w_out;
const size_t src_idx0 = b_idx * src_s[0];
A d = 0;
for (int k_x = 0; k_x < (int)w_k; ++k_x) {
// let out_x = inp_x * p.stride + k_x * p.dilation - p.padding;
int inp_x_stride = (int)(out_x + padding) - k_x * dilation;
if (inp_x_stride < 0 || inp_x_stride % stride) {
continue;
}
int inp_x = inp_x_stride / stride;
if (inp_x >= w_in) continue;
for (int k_y = 0; k_y < (int)h_k; ++k_y) {
int inp_y_stride = (int)(out_y + padding) - k_y * dilation;
if (inp_y_stride < 0 || inp_y_stride % stride) {
continue;
}
int inp_y = inp_y_stride / stride;
if (inp_y >= h_in) continue;
for (size_t src_c_idx = 0; src_c_idx < c_in; ++src_c_idx) {
const size_t src_idx = src_idx0 + src_c_idx * src_s[1] + inp_y * src_s[2] + inp_x * src_s[3];
const size_t k_idx = src_c_idx * k_s[0] + dst_c_idx * k_s[1] + k_y * k_s[2] + k_x * k_s[3];
d += static_cast<A>(src[src_idx]) * static_cast<A>(kernel[k_idx]);
}
}
}
dst[dst_i] = static_cast<T>(d);
}
template <typename T, typename A>
__device__ void avg_pool2d(
const size_t src_numel,
const size_t w_k,
const size_t h_k,
const size_t w_stride,
const size_t h_stride,
const size_t *info,
const T *src,
T *dst
) {
const size_t dst_i = blockIdx.x * blockDim.x + threadIdx.x;
// src: (b_size, c_in, w_in, h_in)
const size_t *src_dims = info;
const size_t *src_s = info + 4;
const size_t c = src_dims[1];
const size_t w_in = src_dims[2];
const size_t h_in = src_dims[3];
const size_t w_out = (w_in - w_k) / w_stride + 1;
const size_t h_out = (h_in - h_k) / h_stride + 1;
if (dst_i >= src_dims[0] * c * w_out * h_out) {
return;
}
// TODO: Improve this.
const size_t b_idx = dst_i / (w_out * h_out * c);
const size_t c_idx = (dst_i / (w_out * h_out)) % c;
const size_t dst_w = (dst_i / h_out) % w_out;
const size_t dst_h = dst_i % h_out;
const size_t src_idx0 = b_idx * src_s[0];
const float scale = 1.0 / (w_k * h_k);
A d = 0;
for (size_t w_offset = 0; w_offset < w_k; ++w_offset) {
size_t src_w = w_stride * dst_w + w_offset;
if (src_w >= w_in) {
continue;
}
for (size_t h_offset = 0; h_offset < h_k; ++h_offset) {
size_t src_h = h_stride * dst_h + h_offset;
if (src_h >= h_in) {
continue;
}
const size_t src_idx = src_idx0 + c_idx * src_s[1] + src_w * src_s[2] + src_h * src_s[3];
d += static_cast<A>(src[src_idx]);
}
}
dst[dst_i] = static_cast<T>(d * scale);
}
template <typename T>
__device__ void max_pool2d(
const size_t src_numel,
const size_t w_k,
const size_t h_k,
const size_t w_stride,
const size_t h_stride,
const size_t *info,
const T *src,
T *dst
) {
const size_t dst_i = blockIdx.x * blockDim.x + threadIdx.x;
// src: (b_size, c_in, w_in, h_in)
const size_t *src_dims = info;
const size_t *src_s = info + 4;
const size_t c = src_dims[1];
const size_t w_in = src_dims[2];
const size_t h_in = src_dims[3];
const size_t w_out = (w_in - w_k) / w_stride + 1;
const size_t h_out = (h_in - h_k) / h_stride + 1;
if (dst_i >= src_dims[0] * c * w_out * h_out) {
return;
}
// TODO: Improve this.
const size_t b_idx = dst_i / (w_out * h_out * c);
const size_t c_idx = (dst_i / (w_out * h_out)) % c;
const size_t dst_w = (dst_i / h_out) % w_out;
const size_t dst_h = dst_i % h_out;
const size_t src_idx0 = b_idx * src_s[0];
T d = 0;
bool set = false;
for (size_t w_offset = 0; w_offset < w_k; ++w_offset) {
size_t src_w = w_stride * dst_w + w_offset;
if (src_w >= w_in) {
continue;
}
for (size_t h_offset = 0; h_offset < h_k; ++h_offset) {
size_t src_h = h_stride * dst_h + h_offset;
if (src_h >= h_in) {
continue;
}
const size_t src_idx = src_idx0 + c_idx * src_s[1] + src_w * src_s[2] + src_h * src_s[3];
if (set) {
d = maxg(d, src[src_idx]);
}
else {
d = src[src_idx];
set = true;
}
}
}
dst[dst_i] = d;
}
template <typename T>
__device__ void upsample_nearest2d(
const size_t w_out,
const size_t h_out,
const double w_scale,
const double h_scale,
const size_t *info,
const T *src,
T *dst
) {
const size_t dst_i = blockIdx.x * blockDim.x + threadIdx.x;
// src: (b_size, c_in, w_in, h_in)
const size_t *src_dims = info;
const size_t *src_s = info + 4;
const size_t c = src_dims[1];
const size_t w_in = src_dims[2];
const size_t h_in = src_dims[3];
if (dst_i >= src_dims[0] * c * w_out * h_out) {
return;
}
// TODO: Improve this.
const size_t b_idx = dst_i / (w_out * h_out * c);
const size_t c_idx = (dst_i / (w_out * h_out)) % c;
const size_t dst_w = (dst_i / h_out) % w_out;
const size_t dst_h = dst_i % h_out;
size_t src_w = static_cast<size_t>(dst_w * w_scale);
size_t src_h = static_cast<size_t>(dst_h * h_scale);
if (src_w >= w_in) {
src_w = w_in - 1;
}
if (src_h >= h_in) {
src_h = h_in - 1;
}
const size_t src_i = b_idx * src_s[0] + c_idx * src_s[1] + src_w * src_s[2] + src_h * src_s[3];
dst[dst_i] = src[src_i];
}
#define CONV1D_OP(TYPENAME, TYPEACC, FN_NAME) \
extern "C" __global__ void FN_NAME( \
const size_t src_numel, \
const size_t num_dims, \
const size_t stride, \
const size_t padding, \
const size_t dilation, \
const size_t *info, \
const TYPENAME *src, \
const TYPENAME *kernel, \
TYPENAME *dst \
) { \
conv1d<TYPENAME, TYPEACC>(src_numel, num_dims, stride, padding, dilation, info, src, kernel, dst); \
} \
#define CONV2D_OP(TYPENAME, TYPEACC, FN_NAME) \
extern "C" __global__ void FN_NAME( \
const size_t src_numel, \
const size_t w_out, \
const size_t h_out, \
const size_t stride, \
const size_t padding, \
const size_t dilation, \
const size_t *info, \
const TYPENAME *src, \
const TYPENAME *kernel, \
TYPENAME *dst \
) { \
conv2d<TYPENAME, TYPEACC>(src_numel, w_out, h_out, stride, padding, dilation, info, src, kernel, dst); \
} \
#define IM2COL1D_OP(TYPENAME, FN_NAME) \
extern "C" __global__ void FN_NAME( \
const size_t dst_numel, \
const size_t l_out, \
const size_t l_k, \
const size_t stride, \
const size_t padding, \
const size_t dilation, \
const size_t *info, \
const TYPENAME *src, \
TYPENAME *dst \
) { \
im2col1d<TYPENAME>(dst_numel, l_out, l_k, stride, padding, dilation, info, src, dst); \
} \
#define COL2IM1D_OP(TYPENAME, FN_NAME) \
extern "C" __global__ void FN_NAME( \
const size_t dst_el, \
const size_t l_out, \
const size_t l_in, \
const size_t c_out, \
const size_t k_size, \
const size_t stride, \
const TYPENAME *src, \
TYPENAME *dst \
) { \
col2im1d<TYPENAME>(dst_el, l_out, l_in, c_out, k_size, stride, src, dst); \
} \
#define IM2COL_OP(TYPENAME, FN_NAME) \
extern "C" __global__ void FN_NAME( \
const size_t dst_numel, \
const size_t h_out, \
const size_t w_out, \
const size_t h_k, \
const size_t w_k, \
const size_t stride, \
const size_t padding, \
const size_t dilation, \
const size_t *info, \
const TYPENAME *src, \
TYPENAME *dst \
) { \
im2col<TYPENAME>(dst_numel, h_out, w_out, h_k, w_k, stride, padding, dilation, info, src, dst); \
} \
#define CONVT1D_OP(TYPENAME, TYPEACC, FN_NAME) \
extern "C" __global__ void FN_NAME( \
const size_t src_numel, \
const size_t l_out, \
const size_t stride, \
const size_t padding, \
const size_t out_padding, \
const size_t dilation, \
const size_t *info, \
const TYPENAME *src, \
const TYPENAME *kernel, \
TYPENAME *dst \
) { \
conv_transpose1d<TYPENAME, TYPEACC>(src_numel, l_out, stride, padding, out_padding, dilation, info, src, kernel, dst); \
} \
#define CONVT2D_OP(TYPENAME, TYPEACC, FN_NAME) \
extern "C" __global__ void FN_NAME( \
const size_t src_numel, \
const size_t w_out, \
const size_t h_out, \
const size_t stride, \
const size_t padding, \
const size_t out_padding, \
const size_t dilation, \
const size_t *info, \
const TYPENAME *src, \
const TYPENAME *kernel, \
TYPENAME *dst \
) { \
conv_transpose2d<TYPENAME, TYPEACC>(src_numel, w_out, h_out, stride, padding, out_padding, dilation, info, src, kernel, dst); \
} \
#define AVG_POOL2D_OP(TYPENAME, TYPEACC, FN_NAME) \
extern "C" __global__ void FN_NAME( \
const size_t src_numel, \
const size_t w_k, \
const size_t h_k, \
const size_t w_stride, \
const size_t h_stride, \
const size_t *info, \
const TYPENAME *src, \
TYPENAME *dst \
) { \
avg_pool2d<TYPENAME, TYPEACC>(src_numel, w_k, h_k, w_stride, h_stride, info, src, dst); \
} \
#define MAX_POOL2D_OP(TYPENAME, FN_NAME) \
extern "C" __global__ void FN_NAME( \
const size_t src_numel, \
const size_t w_k, \
const size_t h_k, \
const size_t w_stride, \
const size_t h_stride, \
const size_t *info, \
const TYPENAME *src, \
TYPENAME *dst \
) { \
max_pool2d<TYPENAME>(src_numel, w_k, h_k, w_stride, h_stride, info, src, dst); \
} \
#define UPSAMPLE_NEAREST2D_OP(TYPENAME, FN_NAME) \
extern "C" __global__ void FN_NAME( \
const size_t w_out, \
const size_t h_out, \
const double w_scale, \
const double h_scale, \
const size_t *info, \
const TYPENAME *src, \
TYPENAME *dst \
) { \
upsample_nearest2d<TYPENAME>(w_out, h_out, w_scale, h_scale, info, src, dst); \
} \
#if __CUDA_ARCH__ >= 800
CONV1D_OP(__nv_bfloat16, float, conv1d_bf16)
CONV2D_OP(__nv_bfloat16, float, conv2d_bf16)
CONVT1D_OP(__nv_bfloat16, float, conv_transpose1d_bf16)
CONVT2D_OP(__nv_bfloat16, float, conv_transpose2d_bf16)
AVG_POOL2D_OP(__nv_bfloat16, float, avg_pool2d_bf16)
MAX_POOL2D_OP(__nv_bfloat16, max_pool2d_bf16)
UPSAMPLE_NEAREST2D_OP(__nv_bfloat16, upsample_nearest2d_bf16)
IM2COL_OP(__nv_bfloat16, im2col_bf16)
IM2COL1D_OP(__nv_bfloat16, im2col1d_bf16)
COL2IM1D_OP(__nv_bfloat16, col2im1d_bf16)
#endif
#if __CUDA_ARCH__ >= 530
CONV1D_OP(__half, float, conv1d_f16)
CONV2D_OP(__half, float, conv2d_f16)
CONVT1D_OP(__half, float, conv_transpose1d_f16)
CONVT2D_OP(__half, float, conv_transpose2d_f16)
AVG_POOL2D_OP(__half, float, avg_pool2d_f16)
MAX_POOL2D_OP(__half, max_pool2d_f16)
UPSAMPLE_NEAREST2D_OP(__half, upsample_nearest2d_f16)
IM2COL_OP(__half, im2col_f16)
IM2COL1D_OP(__half, im2col1d_f16)
COL2IM1D_OP(__half, col2im1d_f16)
#endif
CONV1D_OP(float, float, conv1d_f32)
CONV1D_OP(double, double, conv1d_f64)
CONV1D_OP(uint8_t, uint8_t, conv1d_u8)
CONV1D_OP(uint32_t, uint32_t, conv1d_u32)
CONV2D_OP(float, float, conv2d_f32)
CONV2D_OP(double, double, conv2d_f64)
CONV2D_OP(uint8_t, uint8_t, conv2d_u8)
CONV2D_OP(uint32_t, uint32_t, conv2d_u32)
CONVT1D_OP(float, float, conv_transpose1d_f32)
CONVT1D_OP(double, double, conv_transpose1d_f64)
CONVT1D_OP(uint8_t, uint8_t, conv_transpose1d_u8)
CONVT1D_OP(uint32_t, uint32_t, conv_transpose1d_u32)
CONVT2D_OP(float, float, conv_transpose2d_f32)
CONVT2D_OP(double, double, conv_transpose2d_f64)
CONVT2D_OP(uint8_t, uint8_t, conv_transpose2d_u8)
CONVT2D_OP(uint32_t, uint32_t, conv_transpose2d_u32)
AVG_POOL2D_OP(float, float, avg_pool2d_f32)
AVG_POOL2D_OP(double, double, avg_pool2d_f64)
AVG_POOL2D_OP(uint8_t, uint8_t, avg_pool2d_u8)
AVG_POOL2D_OP(uint32_t, uint32_t, avg_pool2d_u32)
MAX_POOL2D_OP(float, max_pool2d_f32)
MAX_POOL2D_OP(double, max_pool2d_f64)
MAX_POOL2D_OP(uint8_t, max_pool2d_u8)
MAX_POOL2D_OP(uint32_t, max_pool2d_u32)
UPSAMPLE_NEAREST2D_OP(float, upsample_nearest2d_f32)
UPSAMPLE_NEAREST2D_OP(double, upsample_nearest2d_f64)
UPSAMPLE_NEAREST2D_OP(uint8_t, upsample_nearest2d_u8)
UPSAMPLE_NEAREST2D_OP(uint32_t, upsample_nearest2d_u32)
IM2COL_OP(float, im2col_f32)
IM2COL_OP(double, im2col_f64)
IM2COL_OP(uint8_t, im2col_u8)
IM2COL_OP(uint32_t, im2col_u32)
IM2COL1D_OP(float, im2col1d_f32)
IM2COL1D_OP(double, im2col1d_f64)
IM2COL1D_OP(uint8_t, im2col1d_u8)
IM2COL1D_OP(uint32_t, im2col1d_u32)
COL2IM1D_OP(float, col2im1d_f32)
COL2IM1D_OP(double, col2im1d_f64)
COL2IM1D_OP(uint8_t, col2im1d_u8)
COL2IM1D_OP(uint32_t, col2im1d_u32)
| 7 |
0 | hf_public_repos/candle/candle-kernels | hf_public_repos/candle/candle-kernels/src/binary.cu | #include "binary_op_macros.cuh"
#include<stdint.h>
#if __CUDA_ARCH__ >= 800
BINARY_OP(__nv_bfloat16, badd_bf16, x + y)
BINARY_OP(__nv_bfloat16, bdiv_bf16, x / y)
BINARY_OP(__nv_bfloat16, bmul_bf16, x * y)
BINARY_OP(__nv_bfloat16, bsub_bf16, x - y)
BINARY_OP(__nv_bfloat16, bmaximum_bf16, maxg(x, y))
BINARY_OP(__nv_bfloat16, bminimum_bf16, ming(x, y))
BINARY_OP_OUT(__nv_bfloat16, uint8_t, eq_bf16, x == y)
BINARY_OP_OUT(__nv_bfloat16, uint8_t, ne_bf16, x != y)
BINARY_OP_OUT(__nv_bfloat16, uint8_t, lt_bf16, x < y)
BINARY_OP_OUT(__nv_bfloat16, uint8_t, le_bf16, x <= y)
BINARY_OP_OUT(__nv_bfloat16, uint8_t, gt_bf16, x > y)
BINARY_OP_OUT(__nv_bfloat16, uint8_t, ge_bf16, x >= y)
#endif
#if __CUDA_ARCH__ >= 530
BINARY_OP(__half, badd_f16, x + y)
BINARY_OP(__half, bdiv_f16, x / y)
BINARY_OP(__half, bmul_f16, x * y)
BINARY_OP(__half, bsub_f16, x - y)
BINARY_OP(__half, bmaximum_f16, maxg(x, y))
BINARY_OP(__half, bminimum_f16, ming(x, y))
BINARY_OP_OUT(__half, uint8_t, eq_f16, x == y)
BINARY_OP_OUT(__half, uint8_t, ne_f16, x != y)
BINARY_OP_OUT(__half, uint8_t, lt_f16, x < y)
BINARY_OP_OUT(__half, uint8_t, le_f16, x <= y)
BINARY_OP_OUT(__half, uint8_t, gt_f16, x > y)
BINARY_OP_OUT(__half, uint8_t, ge_f16, x >= y)
#endif
BINARY_OP(float, badd_f32, x + y)
BINARY_OP(double, badd_f64, x + y);
BINARY_OP(uint8_t, badd_u8, x + y);
BINARY_OP(uint32_t, badd_u32, x + y);
BINARY_OP(int64_t, badd_i64, x + y);
BINARY_OP(float, bdiv_f32, x / y)
BINARY_OP(double, bdiv_f64, x / y);
BINARY_OP(uint8_t, bdiv_u8, x / y);
BINARY_OP(uint32_t, bdiv_u32, x / y);
BINARY_OP(int64_t, bdiv_i64, x / y);
BINARY_OP(float, bmul_f32, x * y)
BINARY_OP(double, bmul_f64, x * y);
BINARY_OP(uint8_t, bmul_u8, x * y);
BINARY_OP(uint32_t, bmul_u32, x * y);
BINARY_OP(int64_t, bmul_i64, x * y);
BINARY_OP(float, bsub_f32, x - y)
BINARY_OP(double, bsub_f64, x - y);
BINARY_OP(uint8_t, bsub_u8, x - y);
BINARY_OP(uint32_t, bsub_u32, x - y);
BINARY_OP(int64_t, bsub_i64, x - y);
BINARY_OP(float, bminimum_f32, ming(x, y));
BINARY_OP(double, bminimum_f64, ming(x, y));
BINARY_OP(uint8_t, bminimum_u8, ming(x, y));
BINARY_OP(uint32_t, bminimum_u32, ming(x, y));
BINARY_OP(int64_t, bminimum_i64, ming(x, y));
BINARY_OP(float, bmaximum_f32, maxg(x, y));
BINARY_OP(double, bmaximum_f64, maxg(x, y));
BINARY_OP(uint8_t, bmaximum_u8, maxg(x, y));
BINARY_OP(uint32_t, bmaximum_u32, maxg(x, y));
BINARY_OP(int64_t, bmaximum_i64, maxg(x, y));
BINARY_OP_OUT(float, uint8_t, eq_f32, x == y)
BINARY_OP_OUT(double, uint8_t, eq_f64, x == y)
BINARY_OP_OUT(uint8_t, uint8_t, eq_u8, x == y)
BINARY_OP_OUT(uint32_t, uint8_t, eq_u32, x == y)
BINARY_OP_OUT(int64_t, uint8_t, eq_i64, x == y)
BINARY_OP_OUT(float, uint8_t, ne_f32, x != y)
BINARY_OP_OUT(double, uint8_t, ne_f64, x != y)
BINARY_OP_OUT(uint8_t, uint8_t, ne_u8, x != y)
BINARY_OP_OUT(uint32_t, uint8_t, ne_u32, x != y)
BINARY_OP_OUT(int64_t, uint8_t, ne_i64, x != y)
BINARY_OP_OUT(float, uint8_t, lt_f32, x < y)
BINARY_OP_OUT(double, uint8_t, lt_f64, x < y)
BINARY_OP_OUT(uint8_t, uint8_t, lt_u8, x < y)
BINARY_OP_OUT(uint32_t, uint8_t, lt_u32, x < y)
BINARY_OP_OUT(int64_t, uint8_t, lt_i64, x < y)
BINARY_OP_OUT(float, uint8_t, le_f32, x <= y)
BINARY_OP_OUT(double, uint8_t, le_f64, x <= y)
BINARY_OP_OUT(uint8_t, uint8_t, le_u8, x <= y)
BINARY_OP_OUT(uint32_t, uint8_t, le_u32, x <= y)
BINARY_OP_OUT(int64_t, uint8_t, le_i64, x <= y)
BINARY_OP_OUT(float, uint8_t, gt_f32, x > y)
BINARY_OP_OUT(double, uint8_t, gt_f64, x > y)
BINARY_OP_OUT(uint8_t, uint8_t, gt_u8, x > y)
BINARY_OP_OUT(uint32_t, uint8_t, gt_u32, x > y)
BINARY_OP_OUT(int64_t, uint8_t, gt_i64, x > y)
BINARY_OP_OUT(float, uint8_t, ge_f32, x >= y)
BINARY_OP_OUT(double, uint8_t, ge_f64, x >= y)
BINARY_OP_OUT(uint8_t, uint8_t, ge_u8, x >= y)
BINARY_OP_OUT(uint32_t, uint8_t, ge_u32, x >= y)
BINARY_OP_OUT(int64_t, uint8_t, ge_i64, x >= y)
| 8 |
0 | hf_public_repos/candle/candle-kernels | hf_public_repos/candle/candle-kernels/src/binary_op_macros.cuh | #include "cuda_utils.cuh"
#define BINARY_OP_OUT(TYPENAME, OUT_TYPENAME, FN_NAME, FUNC) \
extern "C" __global__ void FN_NAME( \
const size_t numel, \
const size_t num_dims, \
const size_t *dims_and_strides, \
const TYPENAME *lhs, \
const TYPENAME *rhs, \
OUT_TYPENAME *out \
) { \
const size_t *dims = dims_and_strides; \
const size_t *lhs_strides = dims_and_strides + 1 * num_dims; \
const size_t *rhs_strides = dims_and_strides + 2 * num_dims; \
bool lhs_cont = dims_and_strides == nullptr || is_contiguous(num_dims, dims, lhs_strides); \
bool rhs_cont = dims_and_strides == nullptr || is_contiguous(num_dims, dims, rhs_strides); \
if (lhs_cont && rhs_cont) { \
for (unsigned int i = blockIdx.x * blockDim.x + threadIdx.x; i < numel; i += blockDim.x * gridDim.x) { \
TYPENAME x = lhs[i]; \
TYPENAME y = rhs[i]; \
out[i] = FUNC; \
} \
} else if (lhs_cont) { \
for (unsigned int i = blockIdx.x * blockDim.x + threadIdx.x; i < numel; i += blockDim.x * gridDim.x) { \
unsigned int tmp_i = i; \
unsigned int rhs_i = 0; \
for (int d = num_dims - 1; d >= 0; d--) { \
unsigned int i_dim = tmp_i % dims[d]; \
rhs_i += i_dim * rhs_strides[d]; \
tmp_i /= dims[d]; \
} \
TYPENAME x = lhs[i]; \
TYPENAME y = rhs[rhs_i]; \
out[i] = FUNC; \
} \
} else if (rhs_cont) { \
for (unsigned int i = blockIdx.x * blockDim.x + threadIdx.x; i < numel; i += blockDim.x * gridDim.x) { \
unsigned int tmp_i = i; \
unsigned int lhs_i = 0; \
for (int d = num_dims - 1; d >= 0; d--) { \
unsigned int i_dim = tmp_i % dims[d]; \
lhs_i += i_dim * lhs_strides[d]; \
tmp_i /= dims[d]; \
} \
TYPENAME x = lhs[lhs_i]; \
TYPENAME y = rhs[i]; \
out[i] = FUNC; \
} \
} else { \
for (unsigned int i = blockIdx.x * blockDim.x + threadIdx.x; i < numel; i += blockDim.x * gridDim.x) { \
unsigned int tmp_i = i; \
unsigned int lhs_i = 0; \
unsigned int rhs_i = 0; \
for (int d = num_dims - 1; d >= 0; d--) { \
unsigned int i_dim = tmp_i % dims[d]; \
lhs_i += i_dim * lhs_strides[d]; \
rhs_i += i_dim * rhs_strides[d]; \
tmp_i /= dims[d]; \
} \
TYPENAME x = lhs[lhs_i]; \
TYPENAME y = rhs[rhs_i]; \
out[i] = FUNC; \
} \
} \
} \
#define BINARY_OP(TYPENAME, FN_NAME, FUNC) \
BINARY_OP_OUT(TYPENAME, TYPENAME, FN_NAME, FUNC)
| 9 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.