repo_id
stringlengths 15
86
| file_path
stringlengths 28
180
| content
stringlengths 1
1.75M
| __index_level_0__
int64 0
0
|
---|---|---|---|
hf_public_repos/chat-ui/src/lib | hf_public_repos/chat-ui/src/lib/stores/pendingMessage.ts | import { writable } from "svelte/store";
export const pendingMessage = writable<string>("");
| 0 |
hf_public_repos/chat-ui/src/lib | hf_public_repos/chat-ui/src/lib/stores/errors.ts | import { writable } from "svelte/store";
export const ERROR_MESSAGES = {
default: "Oops, something went wrong.",
authOnly: "You have to be logged in.",
rateLimited: "You are sending too many messages. Try again later.",
};
export const error = writable<string | null>(null);
| 0 |
hf_public_repos/chat-ui/src/lib | hf_public_repos/chat-ui/src/lib/constants/publicSepToken.ts | export const PUBLIC_SEP_TOKEN = "</s>";
| 0 |
hf_public_repos/chat-ui/src/lib | hf_public_repos/chat-ui/src/lib/components/StopGeneratingBtn.svelte | <script lang="ts">
import CarbonStopFilledAlt from "~icons/carbon/stop-filled-alt";
export let classNames = "";
</script>
<button
type="button"
on:click
class="btn flex h-9 rounded-lg border bg-white px-3 py-1 shadow-sm transition-all hover:bg-gray-100 dark:border-gray-600 dark:bg-gray-700 dark:hover:bg-gray-600 {classNames}"
>
<CarbonStopFilledAlt class="-ml-1 mr-1 h-[1.25rem] w-[1.1875rem] text-gray-400" /> Stop generating
</button>
| 0 |
hf_public_repos/chat-ui/src/lib | hf_public_repos/chat-ui/src/lib/components/NavMenu.svelte | <script lang="ts">
import { base } from "$app/paths";
import { createEventDispatcher } from "svelte";
import Logo from "$lib/components/icons/Logo.svelte";
import { switchTheme } from "$lib/switchTheme";
import { PUBLIC_APP_NAME, PUBLIC_ORIGIN } from "$env/static/public";
import NavConversationItem from "./NavConversationItem.svelte";
import type { LayoutData } from "../../routes/$types";
const dispatch = createEventDispatcher<{
shareConversation: { id: string; title: string };
clickSettings: void;
clickLogout: void;
}>();
export let conversations: Array<{
id: string;
title: string;
}> = [];
export let user: LayoutData["user"];
</script>
<div class="sticky top-0 flex flex-none items-center justify-between px-3 py-3.5 max-sm:pt-0">
<a class="flex items-center rounded-xl text-lg font-semibold" href="{PUBLIC_ORIGIN}{base}/">
<Logo classNames="mr-1" />
{PUBLIC_APP_NAME}
</a>
<a
href={`${base}/`}
class="flex rounded-lg border bg-white px-2 py-0.5 text-center shadow-sm hover:shadow-none dark:border-gray-600 dark:bg-gray-700"
>
New Chat
</a>
</div>
<div
class="scrollbar-custom flex flex-col gap-1 overflow-y-auto rounded-r-xl bg-gradient-to-l from-gray-50 px-3 pb-3 pt-2 dark:from-gray-800/30"
>
{#each conversations as conv (conv.id)}
<NavConversationItem on:editConversationTitle on:deleteConversation {conv} />
{/each}
</div>
<div
class="mt-0.5 flex flex-col gap-1 rounded-r-xl bg-gradient-to-l from-gray-50 p-3 text-sm dark:from-gray-800/30"
>
{#if user?.username || user?.email}
<form
action="{base}/logout"
method="post"
class="group flex items-center gap-1.5 rounded-lg pl-3 pr-2 hover:bg-gray-100 dark:hover:bg-gray-700"
>
<span
class="flex h-9 flex-none shrink items-center gap-1.5 truncate pr-2 text-gray-500 dark:text-gray-400"
>{user?.username || user?.email}</span
>
<button
type="submit"
class="ml-auto h-6 flex-none items-center gap-1.5 rounded-md border bg-white px-2 text-gray-700 shadow-sm group-hover:flex hover:shadow-none dark:border-gray-600 dark:bg-gray-600 dark:text-gray-400 dark:hover:text-gray-300 md:hidden"
>
Sign Out
</button>
</form>
{/if}
<button
on:click={switchTheme}
type="button"
class="flex h-9 flex-none items-center gap-1.5 rounded-lg pl-3 pr-2 text-gray-500 hover:bg-gray-100 dark:text-gray-400 dark:hover:bg-gray-700"
>
Theme
</button>
<button
on:click={() => dispatch("clickSettings")}
type="button"
class="flex h-9 flex-none items-center gap-1.5 rounded-lg pl-3 pr-2 text-gray-500 hover:bg-gray-100 dark:text-gray-400 dark:hover:bg-gray-700"
>
Settings
</button>
{#if PUBLIC_APP_NAME === "HuggingChat"}
<a
href="https://huggingface.co/spaces/huggingchat/chat-ui/discussions"
target="_blank"
rel="noreferrer"
class="flex h-9 flex-none items-center gap-1.5 rounded-lg pl-3 pr-2 text-gray-500 hover:bg-gray-100 dark:text-gray-400 dark:hover:bg-gray-700"
>
Feedback
</a>
<a
href="{base}/privacy"
class="flex h-9 flex-none items-center gap-1.5 rounded-lg pl-3 pr-2 text-gray-500 hover:bg-gray-100 dark:text-gray-400 dark:hover:bg-gray-700"
>
About & Privacy
</a>
{/if}
</div>
| 0 |
hf_public_repos/chat-ui/src/lib | hf_public_repos/chat-ui/src/lib/components/AnnouncementBanner.svelte | <script lang="ts">
export let title = "";
export let classNames = "";
</script>
<div class="flex items-center rounded-xl bg-gray-100 p-1 text-sm dark:bg-gray-800 {classNames}">
<span
class="mr-2 inline-flex items-center rounded-lg bg-gradient-to-br from-primary-300 px-2 py-1 text-xxs font-medium uppercase leading-3 text-primary-700 dark:from-primary-900 dark:text-primary-400"
>New</span
>
{title}
<div class="ml-auto shrink-0">
<slot />
</div>
</div>
| 0 |
hf_public_repos/chat-ui/src/lib | hf_public_repos/chat-ui/src/lib/components/SettingsModal.svelte | <script lang="ts">
import { createEventDispatcher } from "svelte";
import Modal from "$lib/components/Modal.svelte";
import CarbonClose from "~icons/carbon/close";
import Switch from "$lib/components/Switch.svelte";
import type { Settings } from "$lib/types/Settings";
import { enhance } from "$app/forms";
import { base } from "$app/paths";
import { PUBLIC_APP_DATA_SHARING } from "$env/static/public";
export let settings: Pick<Settings, "shareConversationsWithModelAuthors">;
let shareConversationsWithModelAuthors = settings.shareConversationsWithModelAuthors;
let isConfirmingDeletion = false;
const dispatch = createEventDispatcher<{ close: void }>();
</script>
<Modal on:close>
<div class="flex w-full flex-col gap-5 p-6">
<div class="flex items-start justify-between text-xl font-semibold text-gray-800">
<h2>Settings</h2>
<button type="button" class="group" on:click={() => dispatch("close")}>
<CarbonClose class="text-gray-900 group-hover:text-gray-500" />
</button>
</div>
<form
class="flex flex-col gap-5"
use:enhance={() => {
dispatch("close");
}}
method="post"
action="{base}/settings"
>
{#if PUBLIC_APP_DATA_SHARING}
<label class="flex cursor-pointer select-none items-center gap-2 text-gray-500">
{#each Object.entries(settings).filter(([k]) => k !== "shareConversationsWithModelAuthors") as [key, val]}
<input type="hidden" name={key} value={val} />
{/each}
<Switch
name="shareConversationsWithModelAuthors"
bind:checked={shareConversationsWithModelAuthors}
/>
Share conversations with model authors
</label>
<p class="text-gray-800">
Sharing your data will help improve the training data and make open models better over
time.
</p>
<p class="text-gray-800">
You can change this setting at any time, it applies to all your conversations.
</p>
<p class="text-gray-800">
Read more about this model's authors,
<a
href="https://open-assistant.io/"
target="_blank"
rel="noreferrer"
class="underline decoration-gray-300 hover:decoration-gray-700">Open Assistant</a
>.
</p>
{/if}
<form
method="post"
action="{base}/conversations?/delete"
on:submit|preventDefault={() => (isConfirmingDeletion = true)}
>
<button type="submit" class="underline decoration-gray-300 hover:decoration-gray-700">
Delete all conversations
</button>
</form>
<button
type="submit"
class="mt-2 rounded-full bg-black px-5 py-2 text-lg font-semibold text-gray-100 ring-gray-400 ring-offset-1 transition-all focus-visible:outline-none focus-visible:ring hover:ring"
>
Apply
</button>
</form>
{#if isConfirmingDeletion}
<Modal on:close={() => (isConfirmingDeletion = false)}>
<form
use:enhance={() => {
dispatch("close");
}}
method="post"
action="{base}/conversations?/delete"
class="flex w-full flex-col gap-5 p-6"
>
<div class="flex items-start justify-between text-xl font-semibold text-gray-800">
<h2>Are you sure?</h2>
<button type="button" class="group" on:click={() => (isConfirmingDeletion = false)}>
<CarbonClose class="text-gray-900 group-hover:text-gray-500" />
</button>
</div>
<p class="text-gray-800">
This action will delete all your conversations. This cannot be undone.
</p>
<button
type="submit"
class="mt-2 rounded-full bg-red-700 px-5 py-2 text-lg font-semibold text-gray-100 ring-gray-400 ring-offset-1 transition-all focus-visible:outline-none focus-visible:ring hover:ring"
>
Confirm deletion
</button>
</form>
</Modal>
{/if}
</div>
</Modal>
| 0 |
hf_public_repos/chat-ui/src/lib | hf_public_repos/chat-ui/src/lib/components/WebSearchToggle.svelte | <script lang="ts">
import { webSearchParameters } from "$lib/stores/webSearchParameters";
import CarbonInformation from "~icons/carbon/information";
import Switch from "./Switch.svelte";
const toggle = () => ($webSearchParameters.useSearch = !$webSearchParameters.useSearch);
</script>
<div
class="flex h-9 cursor-pointer select-none items-center gap-2 rounded-xl border bg-white p-1.5 shadow-sm hover:shadow-none dark:border-gray-800 dark:bg-gray-900"
on:click={toggle}
on:keypress={toggle}
>
<Switch name="useSearch" bind:checked={$webSearchParameters.useSearch} on:click on:keypress />
<div class="whitespace-nowrap text-sm text-gray-800 dark:text-gray-200">Search web</div>
<div class="group relative w-max">
<CarbonInformation class="text-xs text-gray-500" />
<div
class="pointer-events-none absolute -top-20 left-1/2 w-max -translate-x-1/2 rounded-md bg-gray-100 p-2 opacity-0 transition-opacity group-hover:opacity-100 dark:bg-gray-800"
>
<p class="max-w-sm text-sm text-gray-800 dark:text-gray-200">
When enabled, the model will try to complement its answer with information queried from the
web.
</p>
</div>
</div>
</div>
| 0 |
hf_public_repos/chat-ui/src/lib | hf_public_repos/chat-ui/src/lib/components/LoginModal.svelte | <script lang="ts">
import { browser } from "$app/environment";
import { base } from "$app/paths";
import { page } from "$app/stores";
import { PUBLIC_APP_DATA_SHARING, PUBLIC_APP_NAME, PUBLIC_VERSION } from "$env/static/public";
import LogoHuggingFaceBorderless from "$lib/components/icons/LogoHuggingFaceBorderless.svelte";
import Modal from "$lib/components/Modal.svelte";
import type { LayoutData } from "../../routes/$types";
import Logo from "./icons/Logo.svelte";
export let settings: LayoutData["settings"];
const isIframe = browser && window.self !== window.parent;
</script>
<Modal>
<div
class="flex w-full flex-col items-center gap-6 bg-gradient-to-t from-primary-500/40 via-primary-500/10 to-primary-500/0 px-4 pb-10 pt-9 text-center "
>
<h2 class="flex items-center text-2xl font-semibold text-gray-800">
<Logo classNames="mr-1" />
{PUBLIC_APP_NAME}
<div
class="ml-3 flex h-6 items-center rounded-lg border border-gray-100 bg-gray-50 px-2 text-base text-gray-400"
>
v{PUBLIC_VERSION}
</div>
</h2>
{#if $page.data.requiresLogin}
<p
class="px-4 text-lg font-semibold leading-snug text-gray-800 sm:px-12"
style="text-wrap: balance;"
>
Please Sign in with Hugging Face to continue
</p>
{/if}
<p class="text-base text-gray-800">
Disclaimer: AI is an area of active research with known problems such as biased generation and
misinformation. Do not use this application for high-stakes decisions or advice.
</p>
{#if PUBLIC_APP_DATA_SHARING}
<p class="px-2 text-sm text-gray-500">
Your conversations will be shared with model authors unless you disable it from your
settings.
</p>
{/if}
<form
action="{base}/{$page.data.requiresLogin ? 'login' : 'settings'}"
target={isIframe ? "_blank" : ""}
method="POST"
class="flex w-full flex-col items-center gap-2"
>
{#if $page.data.requiresLogin}
<button
type="submit"
class="mt-2 flex items-center whitespace-nowrap rounded-full bg-black px-5 py-2 text-lg font-semibold text-gray-100 transition-colors hover:bg-primary-500"
>
Sign in
{#if PUBLIC_APP_NAME === "HuggingChat"}
with <LogoHuggingFaceBorderless classNames="text-xl mr-1 ml-1.5" /> Hugging Face
{/if}
</button>
{:else}
<input type="hidden" name="ethicsModalAccepted" value={true} />
{#each Object.entries(settings) as [key, val]}
<input type="hidden" name={key} value={val} />
{/each}
<button
type="submit"
class="mt-2 rounded-full bg-black px-5 py-2 text-lg font-semibold text-gray-100 transition-colors hover:bg-primary-500"
>
Start chatting
</button>
{/if}
</form>
</div>
</Modal>
| 0 |
hf_public_repos/chat-ui/src/lib | hf_public_repos/chat-ui/src/lib/components/NavConversationItem.svelte | <script lang="ts">
import { base } from "$app/paths";
import { page } from "$app/stores";
import { createEventDispatcher } from "svelte";
import CarbonCheckmark from "~icons/carbon/checkmark";
import CarbonTrashCan from "~icons/carbon/trash-can";
import CarbonClose from "~icons/carbon/close";
import CarbonEdit from "~icons/carbon/edit";
export let conv: { id: string; title: string };
let confirmDelete = false;
const dispatch = createEventDispatcher<{
deleteConversation: string;
editConversationTitle: { id: string; title: string };
}>();
</script>
<a
data-sveltekit-noscroll
on:mouseleave={() => {
confirmDelete = false;
}}
href="{base}/conversation/{conv.id}"
class="group flex h-11 flex-none items-center gap-1.5 rounded-lg pl-3 pr-2 text-gray-500 hover:bg-gray-100 dark:text-gray-400 dark:hover:bg-gray-700 {conv.id ===
$page.params.id
? 'bg-gray-100 dark:bg-gray-700'
: ''}"
>
<div class="flex-1 truncate">
{#if confirmDelete}
<span class="font-semibold"> Delete </span>
{/if}
{conv.title}
</div>
{#if confirmDelete}
<button
type="button"
class="flex h-5 w-5 items-center justify-center rounded md:hidden md:group-hover:flex"
title="Confirm delete action"
on:click|preventDefault={() => dispatch("deleteConversation", conv.id)}
>
<CarbonCheckmark class="text-xs text-gray-400 hover:text-gray-500 dark:hover:text-gray-300" />
</button>
<button
type="button"
class="flex h-5 w-5 items-center justify-center rounded md:hidden md:group-hover:flex"
title="Cancel delete action"
on:click|preventDefault={() => {
confirmDelete = false;
}}
>
<CarbonClose class="text-xs text-gray-400 hover:text-gray-500 dark:hover:text-gray-300" />
</button>
{:else}
<button
type="button"
class="flex h-5 w-5 items-center justify-center rounded md:hidden md:group-hover:flex"
title="Edit conversation title"
on:click|preventDefault={() => {
const newTitle = prompt("Edit this conversation title:", conv.title);
if (!newTitle) return;
dispatch("editConversationTitle", { id: conv.id, title: newTitle });
}}
>
<CarbonEdit class="text-xs text-gray-400 hover:text-gray-500 dark:hover:text-gray-300" />
</button>
<button
type="button"
class="flex h-5 w-5 items-center justify-center rounded md:hidden md:group-hover:flex"
title="Delete conversation"
on:click|preventDefault={(event) => {
if (event.shiftKey) {
dispatch("deleteConversation", conv.id);
} else {
confirmDelete = true;
}
}}
>
<CarbonTrashCan class="text-xs text-gray-400 hover:text-gray-500 dark:hover:text-gray-300" />
</button>
{/if}
</a>
| 0 |
hf_public_repos/chat-ui/src/lib | hf_public_repos/chat-ui/src/lib/components/ModelsModal.svelte | <script lang="ts">
import { createEventDispatcher } from "svelte";
import Modal from "$lib/components/Modal.svelte";
import CarbonClose from "~icons/carbon/close";
import CarbonCheckmark from "~icons/carbon/checkmark-filled";
import ModelCardMetadata from "./ModelCardMetadata.svelte";
import type { Model } from "$lib/types/Model";
import type { LayoutData } from "../../routes/$types";
import { enhance } from "$app/forms";
import { base } from "$app/paths";
export let settings: LayoutData["settings"];
export let models: Array<Model>;
let selectedModelId = settings.activeModel;
const dispatch = createEventDispatcher<{ close: void }>();
</script>
<Modal width="max-w-lg" on:close>
<form
action="{base}/settings"
method="post"
use:enhance={() => {
dispatch("close");
}}
class="flex w-full flex-col gap-5 p-6"
>
{#each Object.entries(settings).filter(([k]) => k !== "activeModel") as [key, val]}
<input type="hidden" name={key} value={val} />
{/each}
<div class="flex items-start justify-between text-xl font-semibold text-gray-800">
<h2>Models</h2>
<button type="button" class="group" on:click={() => dispatch("close")}>
<CarbonClose class="text-gray-900 group-hover:text-gray-500" />
</button>
</div>
<div class="space-y-4">
{#each models as model}
<div
class="rounded-xl border border-gray-100 {model.id === selectedModelId
? 'bg-gradient-to-r from-primary-200/40 via-primary-500/10'
: ''}"
>
<label class="group flex cursor-pointer p-3" on:change aria-label={model.displayName}>
<input
type="radio"
class="sr-only"
name="activeModel"
value={model.id}
bind:group={selectedModelId}
/>
<span>
<span class="text-md block font-semibold leading-tight text-gray-800"
>{model.displayName}</span
>
{#if model.description}
<span class="text-xs text-[#9FA8B5]">{model.description}</span>
{/if}
</span>
<CarbonCheckmark
class="-mr-1 -mt-1 ml-auto shrink-0 text-xl {model.id === selectedModelId
? 'text-primary-400'
: 'text-transparent group-hover:text-gray-200'}"
/>
</label>
<ModelCardMetadata {model} />
</div>
{/each}
</div>
<button
type="submit"
class="mt-2 rounded-full bg-black px-5 py-2 text-lg font-semibold text-gray-100 ring-gray-400 ring-offset-1 transition-colors hover:ring"
>
Apply
</button>
</form>
</Modal>
| 0 |
hf_public_repos/chat-ui/src/lib | hf_public_repos/chat-ui/src/lib/components/ModelCardMetadata.svelte | <script lang="ts">
import CarbonEarth from "~icons/carbon/earth";
import CarbonArrowUpRight from "~icons/carbon/arrow-up-right";
import type { Model } from "$lib/types/Model";
export let model: Pick<Model, "name" | "datasetName" | "websiteUrl" | "modelUrl" | "datasetUrl">;
export let variant: "light" | "dark" = "light";
</script>
<div
class="flex items-center gap-5 rounded-xl bg-gray-100 px-3 py-2 text-sm
{variant === 'dark'
? 'text-gray-600 dark:bg-gray-800 dark:text-gray-300'
: 'text-gray-800 dark:bg-gray-100 dark:text-gray-600'}"
>
<a
href={model.modelUrl || "https://huggingface.co/" + model.name}
target="_blank"
rel="noreferrer"
class="flex items-center hover:underline"
><CarbonArrowUpRight class="mr-1.5 shrink-0 text-xs text-gray-400" />
Model
<div class="max-sm:hidden"> page</div></a
>
{#if model.datasetName || model.datasetUrl}
<a
href={model.datasetUrl || "https://huggingface.co/datasets/" + model.datasetName}
target="_blank"
rel="noreferrer"
class="flex items-center hover:underline"
><CarbonArrowUpRight class="mr-1.5 shrink-0 text-xs text-gray-400" />
Dataset
<div class="max-sm:hidden"> page</div></a
>
{/if}
{#if model.websiteUrl}
<a
href={model.websiteUrl}
target="_blank"
class="ml-auto flex items-center hover:underline"
rel="noreferrer"
>
<CarbonEarth class="mr-1.5 shrink-0 text-xs text-gray-400" />
Website
</a>
{/if}
</div>
| 0 |
hf_public_repos/chat-ui/src/lib | hf_public_repos/chat-ui/src/lib/components/Tooltip.svelte | <script lang="ts">
export let classNames = "";
export let label = "Copied";
export let position = "left-1/2 top-full transform -translate-x-1/2 translate-y-2";
</script>
<div
class="
pointer-events-none absolute rounded bg-black px-2 py-1 font-normal leading-tight text-white shadow transition-opacity
{position}
{classNames}
"
>
<div
class="absolute bottom-full left-1/2 h-0 w-0 -translate-x-1/2 transform border-4 border-t-0 border-black"
style="
border-left-color: transparent;
border-right-color: transparent;
"
/>
{label}
</div>
| 0 |
hf_public_repos/chat-ui/src/lib | hf_public_repos/chat-ui/src/lib/components/Portal.svelte | <script lang="ts">
import { onMount, onDestroy } from "svelte";
let el: HTMLElement;
onMount(() => {
el.ownerDocument.body.appendChild(el);
});
onDestroy(() => {
if (el?.parentNode) {
el.parentNode.removeChild(el);
}
});
</script>
<div bind:this={el} class="contents" hidden>
<slot />
</div>
| 0 |
hf_public_repos/chat-ui/src/lib | hf_public_repos/chat-ui/src/lib/components/Toast.svelte | <script lang="ts">
import { fade } from "svelte/transition";
import IconDazzled from "$lib/components/icons/IconDazzled.svelte";
export let message = "";
</script>
<div
transition:fade={{ duration: 300 }}
class="pointer-events-none fixed right-0 top-12 z-20 bg-gradient-to-bl from-red-500/20 via-red-500/0 to-red-500/0 pb-36 pl-36 pr-2 pt-2 md:top-0 md:pr-8 md:pt-5"
>
<div
class="pointer-events-auto flex items-center rounded-full bg-white/90 px-3 py-1 shadow-sm dark:bg-gray-900/80"
>
<IconDazzled classNames="text-2xl mr-2" />
<h2 class="font-semibold">{message}</h2>
</div>
</div>
| 0 |
hf_public_repos/chat-ui/src/lib | hf_public_repos/chat-ui/src/lib/components/CopyToClipBoardBtn.svelte | <script lang="ts">
import { onDestroy } from "svelte";
import IconCopy from "./icons/IconCopy.svelte";
import Tooltip from "./Tooltip.svelte";
export let classNames = "";
export let value: string;
let isSuccess = false;
let timeout: ReturnType<typeof setTimeout>;
const handleClick = async () => {
// writeText() can be unavailable or fail in some cases (iframe, etc) so we try/catch
try {
await navigator.clipboard.writeText(value);
isSuccess = true;
if (timeout) {
clearTimeout(timeout);
}
timeout = setTimeout(() => {
isSuccess = false;
}, 1000);
} catch (err) {
console.error(err);
}
};
onDestroy(() => {
if (timeout) {
clearTimeout(timeout);
}
});
</script>
<button
class="btn rounded-lg border border-gray-200 px-2 py-2 text-sm shadow-sm transition-all hover:border-gray-300 active:shadow-inner dark:border-gray-600 dark:hover:border-gray-400 {classNames}
{!isSuccess && 'text-gray-200 dark:text-gray-200'}
{isSuccess && 'text-green-500'}
"
title={"Copy to clipboard"}
type="button"
on:click={handleClick}
>
<span class="relative">
<IconCopy />
<Tooltip classNames={isSuccess ? "opacity-100" : "opacity-0"} />
</span>
</button>
| 0 |
hf_public_repos/chat-ui/src/lib | hf_public_repos/chat-ui/src/lib/components/OpenWebSearchResults.svelte | <script lang="ts">
import type { WebSearchMessage } from "$lib/types/WebSearch";
import CarbonCaretRight from "~icons/carbon/caret-right";
import CarbonCheckmark from "~icons/carbon/checkmark-filled";
import CarbonError from "~icons/carbon/error-filled";
import EosIconsLoading from "~icons/eos-icons/loading";
export let loading = false;
export let classNames = "";
export let webSearchMessages: WebSearchMessage[] = [];
let detailsOpen: boolean;
let error: boolean;
$: error = webSearchMessages[webSearchMessages.length - 2]?.type === "error";
</script>
<details
class="flex w-fit rounded-xl border border-gray-200 bg-white shadow-sm dark:border-gray-800 dark:bg-gray-900 {classNames} max-w-full"
bind:open={detailsOpen}
>
<summary
class="align-center flex cursor-pointer select-none list-none py-1 pl-2.5 pr-2 align-text-top transition-all"
>
{#if error}
<CarbonError class="my-auto text-red-700 dark:text-red-500" />
{:else if loading}
<EosIconsLoading class="my-auto text-gray-500" />
{:else}
<CarbonCheckmark class="my-auto text-gray-500" />
{/if}
<span class="px-2 font-medium" class:text-red-700={error} class:dark:text-red-500={error}
>Web search
</span>
<div class="my-auto transition-all" class:rotate-90={detailsOpen}>
<CarbonCaretRight />
</div>
</summary>
<div class="content px-5 pb-5 pt-4">
{#if webSearchMessages.length === 0}
<div class="mx-auto w-fit">
<EosIconsLoading class="mb-3 h-4 w-4" />
</div>
{:else}
<ol>
{#each webSearchMessages as message}
{#if message.type === "update"}
<li class="group border-l pb-6 last:!border-transparent last:pb-0 dark:border-gray-800">
<div class="flex items-start">
<div
class="-ml-1.5 h-3 w-3 flex-none rounded-full bg-gray-200 dark:bg-gray-600 {loading
? 'group-last:animate-pulse group-last:bg-gray-300 group-last:dark:bg-gray-500'
: ''}"
/>
<h3 class="text-md -mt-1.5 pl-2.5 text-gray-800 dark:text-gray-100">
{message.message}
</h3>
</div>
{#if message.args}
<p class="mt-1.5 pl-4 text-gray-500 dark:text-gray-400">
{message.args}
</p>
{/if}
</li>
{:else if message.type === "error"}
<li class="group border-l pb-6 last:!border-transparent last:pb-0 dark:border-gray-800">
<div class="flex items-start">
<CarbonError
class="-ml-1.5 h-3 w-3 flex-none scale-110 text-red-700 dark:text-red-500"
/>
<h3 class="text-md -mt-1.5 pl-2.5 text-red-700 dark:text-red-500">
{message.message}
</h3>
</div>
{#if message.args}
<p class="mt-1.5 pl-4 text-gray-500 dark:text-gray-400">
{message.args}
</p>
{/if}
</li>
{/if}
{/each}
</ol>
{/if}
</div>
</details>
<style>
@keyframes grow {
0% {
font-size: 0;
opacity: 0;
}
30% {
font-size: 1em;
opacity: 0;
}
100% {
opacity: 1;
}
}
details[open] .content {
animation-name: grow;
animation-duration: 300ms;
animation-delay: 0ms;
}
details summary::-webkit-details-marker {
display: none;
}
</style>
| 0 |
hf_public_repos/chat-ui/src/lib | hf_public_repos/chat-ui/src/lib/components/Modal.svelte | <script lang="ts">
import { createEventDispatcher, onDestroy, onMount } from "svelte";
import { cubicOut } from "svelte/easing";
import { fade } from "svelte/transition";
import Portal from "./Portal.svelte";
import { browser } from "$app/environment";
export let width = "max-w-sm";
let backdropEl: HTMLDivElement;
let modalEl: HTMLDivElement;
const dispatch = createEventDispatcher<{ close: void }>();
function handleKeydown(event: KeyboardEvent) {
// close on ESC
if (event.key === "Escape") {
event.preventDefault();
dispatch("close");
}
}
function handleBackdropClick(event: MouseEvent) {
if (event.target === backdropEl) {
dispatch("close");
}
}
onMount(() => {
document.getElementById("app")?.setAttribute("inert", "true");
modalEl.focus();
});
onDestroy(() => {
if (!browser) return;
// remove inert attribute if this is the last modal
if (document.querySelectorAll('[role="dialog"]:not(#app *)').length === 1) {
document.getElementById("app")?.removeAttribute("inert");
}
});
</script>
<Portal>
<div
role="presentation"
tabindex="-1"
bind:this={backdropEl}
on:click={handleBackdropClick}
transition:fade={{ easing: cubicOut, duration: 300 }}
class="fixed inset-0 z-40 flex items-center justify-center bg-black/80 p-8 backdrop-blur-sm dark:bg-black/50"
>
<div
role="dialog"
tabindex="-1"
bind:this={modalEl}
on:keydown={handleKeydown}
class="-mt-10 overflow-hidden rounded-2xl bg-white shadow-2xl outline-none md:-mt-20 {width}"
>
<slot />
</div>
</div>
</Portal>
| 0 |
hf_public_repos/chat-ui/src/lib | hf_public_repos/chat-ui/src/lib/components/CodeBlock.svelte | <script lang="ts">
import { afterUpdate } from "svelte";
import CopyToClipBoardBtn from "./CopyToClipBoardBtn.svelte";
export let code = "";
export let lang = "";
$: highlightedCode = "";
afterUpdate(async () => {
const { default: hljs } = await import("highlight.js");
const language = hljs.getLanguage(lang);
highlightedCode = hljs.highlightAuto(code, language?.aliases).value;
});
</script>
<div class="group relative my-4 rounded-lg">
<!-- eslint-disable svelte/no-at-html-tags -->
<pre
class="scrollbar-custom overflow-auto px-5 scrollbar-thumb-gray-500 hover:scrollbar-thumb-gray-400 dark:scrollbar-thumb-white/10 dark:hover:scrollbar-thumb-white/20"><code
class="language-{lang}">{@html highlightedCode || code.replaceAll("<", "<")}</code
></pre>
<CopyToClipBoardBtn
classNames="absolute top-2 right-2 invisible opacity-0 group-hover:visible group-hover:opacity-100"
value={code}
/>
</div>
| 0 |
hf_public_repos/chat-ui/src/lib | hf_public_repos/chat-ui/src/lib/components/ScrollToBottomBtn.svelte | <script lang="ts">
import { fade } from "svelte/transition";
import { onDestroy } from "svelte";
import IconChevron from "./icons/IconChevron.svelte";
export let scrollNode: HTMLElement;
export { className as class };
let visible = false;
let className = "";
let observer: ResizeObserver | null = null;
$: if (scrollNode) {
destroy();
if (window.ResizeObserver) {
observer = new ResizeObserver(() => {
updateVisibility();
});
observer.observe(scrollNode);
}
scrollNode.addEventListener("scroll", updateVisibility);
}
function updateVisibility() {
if (!scrollNode) return;
visible =
Math.ceil(scrollNode.scrollTop) + 200 < scrollNode.scrollHeight - scrollNode.clientHeight;
}
function destroy() {
observer?.disconnect();
scrollNode?.removeEventListener("scroll", updateVisibility);
}
onDestroy(destroy);
</script>
{#if visible}
<button
transition:fade|local={{ duration: 150 }}
on:click={() => scrollNode.scrollTo({ top: scrollNode.scrollHeight, behavior: "smooth" })}
class="btn absolute flex h-[41px] w-[41px] rounded-full border bg-white shadow-md transition-all hover:bg-gray-100 dark:border-gray-600 dark:bg-gray-700 dark:shadow-gray-950 dark:hover:bg-gray-600 {className}"
><IconChevron classNames="mt-[2px]" /></button
>
{/if}
| 0 |
hf_public_repos/chat-ui/src/lib | hf_public_repos/chat-ui/src/lib/components/MobileNav.svelte | <script lang="ts">
import { navigating } from "$app/stores";
import { createEventDispatcher } from "svelte";
import { browser } from "$app/environment";
import { base } from "$app/paths";
import CarbonClose from "~icons/carbon/close";
import CarbonAdd from "~icons/carbon/add";
import CarbonTextAlignJustify from "~icons/carbon/text-align-justify";
export let isOpen = false;
export let title: string | undefined;
$: title = title || "New Chat";
let closeEl: HTMLButtonElement;
let openEl: HTMLButtonElement;
const dispatch = createEventDispatcher();
$: if ($navigating) {
dispatch("toggle", false);
}
$: if (isOpen && closeEl) {
closeEl.focus();
} else if (!isOpen && browser && document.activeElement === closeEl) {
openEl.focus();
}
</script>
<nav
class="flex h-12 items-center justify-between border-b bg-gray-50 px-4 dark:border-gray-800 dark:bg-gray-800/70 md:hidden"
>
<button
type="button"
class="-ml-3 flex h-9 w-9 shrink-0 items-center justify-center"
on:click={() => dispatch("toggle", true)}
aria-label="Open menu"
bind:this={openEl}><CarbonTextAlignJustify /></button
>
<span class="truncate px-4">{title}</span>
<a href={`${base}/`} class="-mr-3 flex h-9 w-9 shrink-0 items-center justify-center"
><CarbonAdd /></a
>
</nav>
<nav
class="fixed inset-0 z-30 grid max-h-screen grid-cols-1 grid-rows-[auto,auto,1fr,auto] bg-white bg-gradient-to-l from-gray-50 dark:bg-gray-900 dark:from-gray-800/30 {isOpen
? 'block'
: 'hidden'}"
>
<div class="flex h-12 items-center px-4">
<button
type="button"
class="-mr-3 ml-auto flex h-9 w-9 items-center justify-center"
on:click={() => dispatch("toggle", false)}
aria-label="Close menu"
bind:this={closeEl}><CarbonClose /></button
>
</div>
<slot />
</nav>
| 0 |
hf_public_repos/chat-ui/src/lib | hf_public_repos/chat-ui/src/lib/components/Switch.svelte | <script lang="ts">
export let checked: boolean;
export let name: string;
</script>
<input bind:checked type="checkbox" {name} class="peer pointer-events-none absolute opacity-0" />
<div
on:click
on:keypress
class="relative inline-flex h-5 w-9 shrink-0 items-center rounded-full bg-gray-300 p-1 shadow-inner ring-gray-400 transition-all peer-checked:bg-blue-600 peer-focus-visible:ring peer-focus-visible:ring-offset-1 hover:bg-gray-400 dark:bg-gray-600 peer-checked:[&>div]:translate-x-3.5"
>
<div class="h-3.5 w-3.5 rounded-full bg-white shadow-sm transition-all" />
</div>
| 0 |
hf_public_repos/chat-ui/src/lib/components | hf_public_repos/chat-ui/src/lib/components/chat/ChatMessages.svelte | <script lang="ts">
import type { Message } from "$lib/types/Message";
import { snapScrollToBottom } from "$lib/actions/snapScrollToBottom";
import ScrollToBottomBtn from "$lib/components/ScrollToBottomBtn.svelte";
import { tick } from "svelte";
import { randomUUID } from "$lib/utils/randomUuid";
import type { Model } from "$lib/types/Model";
import type { LayoutData } from "../../../routes/$types";
import ChatIntroduction from "./ChatIntroduction.svelte";
import ChatMessage from "./ChatMessage.svelte";
import type { WebSearchMessage } from "$lib/types/WebSearch";
export let messages: Message[];
export let loading: boolean;
export let pending: boolean;
export let isAuthor: boolean;
export let currentModel: Model;
export let settings: LayoutData["settings"];
export let models: Model[];
export let readOnly: boolean;
export let searches: Record<string, WebSearchMessage[]>;
let webSearchArray: Array<WebSearchMessage[] | undefined> = [];
let chatContainer: HTMLElement;
export let webSearchMessages: WebSearchMessage[] = [];
async function scrollToBottom() {
await tick();
chatContainer.scrollTop = chatContainer.scrollHeight;
}
// If last message is from user, scroll to bottom
$: if (messages[messages.length - 1]?.from === "user") {
scrollToBottom();
}
$: messages,
(webSearchArray = messages.map((message, idx) => {
if (message.webSearchId) {
return searches[message.webSearchId] ?? [];
} else if (idx === messages.length - 1) {
return webSearchMessages;
} else {
return [];
}
}));
</script>
<div
class="scrollbar-custom mr-1 h-full overflow-y-auto"
use:snapScrollToBottom={messages.length ? [...messages, ...webSearchMessages] : false}
bind:this={chatContainer}
>
<div class="mx-auto flex h-full max-w-3xl flex-col gap-6 px-5 pt-6 sm:gap-8 xl:max-w-4xl">
{#each messages as message, i}
<ChatMessage
loading={loading && i === messages.length - 1}
{message}
{isAuthor}
{readOnly}
model={currentModel}
webSearchMessages={webSearchArray[i]}
on:retry
on:vote
/>
{:else}
<ChatIntroduction {settings} {models} {currentModel} on:message />
{/each}
{#if pending}
<ChatMessage
message={{ from: "assistant", content: "", id: randomUUID() }}
model={currentModel}
{webSearchMessages}
/>
{/if}
<div class="h-44 flex-none" />
</div>
<ScrollToBottomBtn
class="bottom-36 right-4 max-md:hidden lg:right-10"
scrollNode={chatContainer}
/>
</div>
| 0 |
hf_public_repos/chat-ui/src/lib/components | hf_public_repos/chat-ui/src/lib/components/chat/ChatWindow.svelte | <script lang="ts">
import type { Message } from "$lib/types/Message";
import { createEventDispatcher } from "svelte";
import CarbonSendAltFilled from "~icons/carbon/send-alt-filled";
import CarbonExport from "~icons/carbon/export";
import CarbonStopFilledAlt from "~icons/carbon/stop-filled-alt";
import EosIconsLoading from "~icons/eos-icons/loading";
import ChatMessages from "./ChatMessages.svelte";
import ChatInput from "./ChatInput.svelte";
import StopGeneratingBtn from "../StopGeneratingBtn.svelte";
import type { Model } from "$lib/types/Model";
import type { LayoutData } from "../../../routes/$types";
import WebSearchToggle from "../WebSearchToggle.svelte";
import type { WebSearchMessage } from "$lib/types/WebSearch";
import LoginModal from "../LoginModal.svelte";
export let messages: Message[] = [];
export let loading = false;
export let pending = false;
export let shared = false;
export let currentModel: Model;
export let models: Model[];
export let settings: LayoutData["settings"];
export let webSearchMessages: WebSearchMessage[] = [];
export let searches: Record<string, WebSearchMessage[]> = {};
export let loginRequired = false;
$: isReadOnly = !models.some((model) => model.id === currentModel.id);
let loginModalOpen = false;
let message: string;
const dispatch = createEventDispatcher<{
message: string;
share: void;
stop: void;
retry: { id: Message["id"]; content: string };
}>();
const handleSubmit = () => {
if (loading) return;
dispatch("message", message);
message = "";
};
</script>
<div class="relative min-h-0 min-w-0">
{#if loginModalOpen}
<LoginModal {settings} on:close={() => (loginModalOpen = false)} />
{/if}
<ChatMessages
{loading}
{pending}
{settings}
{currentModel}
{models}
{messages}
readOnly={isReadOnly}
isAuthor={!shared}
{webSearchMessages}
{searches}
on:message
on:vote
on:retry={(ev) => {
if (!loading) dispatch("retry", ev.detail);
}}
/>
<div
class="dark:via-gray-80 pointer-events-none absolute inset-x-0 bottom-0 z-0 mx-auto flex w-full max-w-3xl flex-col items-center justify-center bg-gradient-to-t from-white via-white/80 to-white/0 px-3.5 py-4 dark:border-gray-800 dark:from-gray-900 dark:to-gray-900/0 max-md:border-t max-md:bg-white max-md:dark:bg-gray-900 sm:px-5 md:py-8 xl:max-w-4xl [&>*]:pointer-events-auto"
>
<div class="flex w-full pb-3 max-md:justify-between">
{#if settings?.searchEnabled}
<WebSearchToggle />
{/if}
{#if loading}
<StopGeneratingBtn
classNames={settings?.searchEnabled ? "md:-translate-x-1/2 md:mx-auto" : "mx-auto"}
on:click={() => dispatch("stop")}
/>
{/if}
</div>
<form
on:submit|preventDefault={handleSubmit}
class="relative flex w-full max-w-4xl flex-1 items-center rounded-xl border bg-gray-100 focus-within:border-gray-300 dark:border-gray-600 dark:bg-gray-700 dark:focus-within:border-gray-500
{isReadOnly ? 'opacity-30' : ''}"
>
<div class="flex w-full flex-1 border-none bg-transparent">
<ChatInput
placeholder="Ask anything"
bind:value={message}
on:submit={handleSubmit}
on:keypress={() => {
if (loginRequired) loginModalOpen = true;
}}
maxRows={4}
disabled={isReadOnly}
/>
{#if loading}
<button
class="btn mx-1 my-1 inline-block h-[2.4rem] self-end rounded-lg bg-transparent p-1 px-[0.7rem] text-gray-400 disabled:opacity-60 enabled:hover:text-gray-700 dark:disabled:opacity-40 enabled:dark:hover:text-gray-100 md:hidden"
on:click={() => dispatch("stop")}
>
<CarbonStopFilledAlt />
</button>
<div
class="mx-1 my-1 hidden h-[2.4rem] items-center p-1 px-[0.7rem] text-gray-400 disabled:opacity-60 enabled:hover:text-gray-700 dark:disabled:opacity-40 enabled:dark:hover:text-gray-100 md:flex"
>
<EosIconsLoading />
</div>
{:else}
<button
class="btn mx-1 my-1 h-[2.4rem] self-end rounded-lg bg-transparent p-1 px-[0.7rem] text-gray-400 disabled:opacity-60 enabled:hover:text-gray-700 dark:disabled:opacity-40 enabled:dark:hover:text-gray-100"
disabled={!message || isReadOnly}
type="submit"
>
<CarbonSendAltFilled />
</button>
{/if}
</div>
</form>
<div class="mt-2 flex justify-between self-stretch px-1 text-xs text-gray-400/90 max-sm:gap-2">
<p>
Model: <a
href="https://huggingface.co/{currentModel.name}"
target="_blank"
rel="noreferrer"
class="hover:underline">{currentModel.displayName}</a
> <span class="max-sm:hidden">·</span><br class="sm:hidden" /> Generated content may be inaccurate
or false.
</p>
{#if messages.length}
<button
class="flex flex-none items-center hover:text-gray-400 hover:underline max-sm:rounded-lg max-sm:bg-gray-50 max-sm:px-2.5 dark:max-sm:bg-gray-800"
type="button"
on:click={() => dispatch("share")}
>
<CarbonExport class="text-[.6rem] sm:mr-1.5 sm:text-primary-500" />
<div class="max-sm:hidden">Share this conversation</div>
</button>
{/if}
</div>
</div>
</div>
| 0 |
hf_public_repos/chat-ui/src/lib/components | hf_public_repos/chat-ui/src/lib/components/chat/ChatIntroduction.svelte | <script lang="ts">
import { PUBLIC_APP_NAME, PUBLIC_VERSION } from "$env/static/public";
import { PUBLIC_ANNOUNCEMENT_BANNERS } from "$env/static/public";
import Logo from "$lib/components/icons/Logo.svelte";
import { createEventDispatcher } from "svelte";
import IconChevron from "$lib/components/icons/IconChevron.svelte";
import CarbonArrowUpRight from "~icons/carbon/arrow-up-right";
import AnnouncementBanner from "../AnnouncementBanner.svelte";
import ModelsModal from "../ModelsModal.svelte";
import type { Model } from "$lib/types/Model";
import ModelCardMetadata from "../ModelCardMetadata.svelte";
import type { LayoutData } from "../../../routes/$types";
import { findCurrentModel } from "$lib/utils/models";
export let currentModel: Model;
export let settings: LayoutData["settings"];
export let models: Model[];
let isModelsModalOpen = false;
$: currentModelMetadata = findCurrentModel(models, settings.activeModel);
const announcementBanners = PUBLIC_ANNOUNCEMENT_BANNERS
? JSON.parse(PUBLIC_ANNOUNCEMENT_BANNERS)
: [];
const dispatch = createEventDispatcher<{ message: string }>();
</script>
<div class="my-auto grid gap-8 lg:grid-cols-3">
<div class="lg:col-span-1">
<div>
<div class="mb-3 flex items-center text-2xl font-semibold">
<Logo classNames="mr-1 flex-none" />
{PUBLIC_APP_NAME}
<div
class="ml-3 flex h-6 items-center rounded-lg border border-gray-100 bg-gray-50 px-2 text-base text-gray-400 dark:border-gray-700/60 dark:bg-gray-800"
>
v{PUBLIC_VERSION}
</div>
</div>
<p class="text-base text-gray-600 dark:text-gray-400">
Making the community's best AI chat models available to everyone.
</p>
</div>
</div>
<div class="lg:col-span-2 lg:pl-24">
{#each announcementBanners as banner}
<AnnouncementBanner classNames="mb-4" title={banner.title}>
<a
target="_blank"
href={banner.linkHref}
class="mr-2 flex items-center underline hover:no-underline"
><CarbonArrowUpRight class="mr-1.5 text-xs" /> {banner.linkTitle}</a
>
</AnnouncementBanner>
{/each}
{#if isModelsModalOpen}
<ModelsModal {settings} {models} on:close={() => (isModelsModalOpen = false)} />
{/if}
<div class="overflow-hidden rounded-xl border dark:border-gray-800">
<div class="flex p-3">
<div>
<div class="text-sm text-gray-600 dark:text-gray-400">Current Model</div>
<div class="font-semibold">{currentModel.displayName}</div>
</div>
{#if models.length > 1}
<button
type="button"
on:click={() => (isModelsModalOpen = true)}
class="btn ml-auto flex h-7 w-7 self-start rounded-full bg-gray-100 p-1 text-xs hover:bg-gray-100 dark:border-gray-600 dark:bg-gray-800 dark:hover:bg-gray-600"
><IconChevron /></button
>
{/if}
</div>
<ModelCardMetadata variant="dark" model={currentModel} />
</div>
</div>
{#if currentModelMetadata.promptExamples}
<div class="lg:col-span-3 lg:mt-12">
<p class="mb-3 text-gray-600 dark:text-gray-300">Examples</p>
<div class="grid gap-3 lg:grid-cols-3 lg:gap-5">
{#each currentModelMetadata.promptExamples as example}
<button
type="button"
class="rounded-xl border bg-gray-50 p-2.5 text-gray-600 hover:bg-gray-100 dark:border-gray-800 dark:bg-gray-800 dark:text-gray-300 dark:hover:bg-gray-700 sm:p-4"
on:click={() => dispatch("message", example.prompt)}
>
{example.title}
</button>
{/each}
</div>
</div>{/if}
</div>
| 0 |
hf_public_repos/chat-ui/src/lib/components | hf_public_repos/chat-ui/src/lib/components/chat/ChatMessage.svelte | <script lang="ts">
import { marked } from "marked";
import type { Message } from "$lib/types/Message";
import { afterUpdate, createEventDispatcher } from "svelte";
import { deepestChild } from "$lib/utils/deepestChild";
import { page } from "$app/stores";
import CodeBlock from "../CodeBlock.svelte";
import IconLoading from "../icons/IconLoading.svelte";
import CarbonRotate360 from "~icons/carbon/rotate-360";
import CarbonDownload from "~icons/carbon/download";
import CarbonThumbsUp from "~icons/carbon/thumbs-up";
import CarbonThumbsDown from "~icons/carbon/thumbs-down";
import { PUBLIC_SEP_TOKEN } from "$lib/constants/publicSepToken";
import type { Model } from "$lib/types/Model";
import type { WebSearchMessage } from "$lib/types/WebSearch";
import OpenWebSearchResults from "../OpenWebSearchResults.svelte";
function sanitizeMd(md: string) {
let ret = md
.replace(/<\|[a-z]*$/, "")
.replace(/<\|[a-z]+\|$/, "")
.replace(/<$/, "")
.replaceAll(PUBLIC_SEP_TOKEN, " ")
.replaceAll(/<\|[a-z]+\|>/g, " ")
.replaceAll(/<br\s?\/?>/gi, "\n")
.replaceAll("<", "<")
.trim();
for (const stop of [...(model.parameters?.stop ?? []), "<|endoftext|>"]) {
if (ret.endsWith(stop)) {
ret = ret.slice(0, -stop.length).trim();
}
}
return ret;
}
function unsanitizeMd(md: string) {
return md.replaceAll("<", "<");
}
export let model: Model;
export let message: Message;
export let loading = false;
export let isAuthor = true;
export let readOnly = false;
export let isTapped = false;
export let webSearchMessages: WebSearchMessage[] = [];
const dispatch = createEventDispatcher<{
retry: { content: string; id: Message["id"] };
vote: { score: Message["score"]; id: Message["id"] };
}>();
let contentEl: HTMLElement;
let loadingEl: IconLoading;
let pendingTimeout: ReturnType<typeof setTimeout>;
const renderer = new marked.Renderer();
// For code blocks with simple backticks
renderer.codespan = (code) => {
// Unsanitize double-sanitized code
return `<code>${code.replaceAll("&", "&")}</code>`;
};
const options: marked.MarkedOptions = {
...marked.getDefaults(),
gfm: true,
breaks: true,
renderer,
};
$: tokens = marked.lexer(sanitizeMd(message.content));
afterUpdate(() => {
loadingEl?.$destroy();
clearTimeout(pendingTimeout);
// Add loading animation to the last message if update takes more than 600ms
if (loading) {
pendingTimeout = setTimeout(() => {
if (contentEl) {
loadingEl = new IconLoading({
target: deepestChild(contentEl),
props: { classNames: "loading inline ml-2" },
});
}
}, 600);
}
});
$: downloadLink =
message.from === "user" ? `${$page.url.pathname}/message/${message.id}/prompt` : undefined;
let webSearchIsDone = true;
$: webSearchIsDone =
webSearchMessages.length > 0 &&
webSearchMessages[webSearchMessages.length - 1].type === "result";
</script>
{#if message.from === "assistant"}
<div
class="group relative -mb-8 flex items-start justify-start gap-4 pb-8 leading-relaxed"
on:click={() => (isTapped = !isTapped)}
on:keypress={() => (isTapped = !isTapped)}
>
<img
alt=""
src="https://huggingface.co/avatars/2edb18bd0206c16b433841a47f53fa8e.svg"
class="mt-5 h-3 w-3 flex-none select-none rounded-full shadow-lg"
/>
<div
class="relative min-h-[calc(2rem+theme(spacing[3.5])*2)] min-w-[60px] break-words rounded-2xl border border-gray-100 bg-gradient-to-br from-gray-50 px-5 py-3.5 text-gray-600 prose-pre:my-2 dark:border-gray-800 dark:from-gray-800/40 dark:text-gray-300"
>
{#if webSearchMessages && webSearchMessages.length > 0}
<OpenWebSearchResults
classNames={tokens.length ? "mb-3.5" : ""}
{webSearchMessages}
loading={!webSearchIsDone}
/>
{/if}
{#if !message.content && (webSearchIsDone || (webSearchMessages && webSearchMessages.length === 0))}
<IconLoading />
{/if}
<div
class="prose max-w-none dark:prose-invert max-sm:prose-sm prose-headings:font-semibold prose-h1:text-lg prose-h2:text-base prose-h3:text-base prose-pre:bg-gray-800 dark:prose-pre:bg-gray-900"
bind:this={contentEl}
>
{#each tokens as token}
{#if token.type === "code"}
<CodeBlock lang={token.lang} code={unsanitizeMd(token.text)} />
{:else}
<!-- eslint-disable-next-line svelte/no-at-html-tags -->
{@html marked(token.raw, options)}
{/if}
{/each}
</div>
</div>
{#if isAuthor && !loading && message.content}
<div
class="absolute bottom-1 right-0 flex max-md:transition-all md:bottom-0 md:group-hover:visible md:group-hover:opacity-100
{message.score ? 'visible opacity-100' : 'invisible max-md:-translate-y-4 max-md:opacity-0'}
{isTapped ? 'max-md:visible max-md:translate-y-0 max-md:opacity-100' : ''}
"
>
<button
class="btn rounded-sm p-1 text-sm text-gray-400 focus:ring-0 hover:text-gray-500 dark:text-gray-400 dark:hover:text-gray-300
{message.score && message.score > 0
? 'text-green-500 hover:text-green-500 dark:text-green-400 hover:dark:text-green-400'
: ''}"
title={message.score === 1 ? "Remove +1" : "+1"}
type="button"
on:click={() => dispatch("vote", { score: message.score === 1 ? 0 : 1, id: message.id })}
>
<CarbonThumbsUp class="h-[1.14em] w-[1.14em]" />
</button>
<button
class="btn rounded-sm p-1 text-sm text-gray-400 focus:ring-0 hover:text-gray-500 dark:text-gray-400 dark:hover:text-gray-300
{message.score && message.score < 0
? 'text-red-500 hover:text-red-500 dark:text-red-400 hover:dark:text-red-400'
: ''}"
title={message.score === -1 ? "Remove -1" : "-1"}
type="button"
on:click={() =>
dispatch("vote", { score: message.score === -1 ? 0 : -1, id: message.id })}
>
<CarbonThumbsDown class="h-[1.14em] w-[1.14em]" />
</button>
</div>
{/if}
</div>
{/if}
{#if message.from === "user"}
<div class="group relative flex items-start justify-start gap-4 max-sm:text-sm">
<div class="mt-5 h-3 w-3 flex-none rounded-full" />
<div
class="max-w-full whitespace-break-spaces break-words rounded-2xl px-5 py-3.5 text-gray-500 dark:text-gray-400"
>
{message.content.trim()}
</div>
{#if !loading}
<div class="absolute right-0 top-3.5 flex gap-2 lg:-right-2">
{#if downloadLink}
<a
class="rounded-lg border border-gray-100 p-1 text-xs text-gray-400 group-hover:block hover:text-gray-500 dark:border-gray-800 dark:text-gray-400 dark:hover:text-gray-300 md:hidden"
title="Download prompt and parameters"
type="button"
target="_blank"
href={downloadLink}
>
<CarbonDownload />
</a>
{/if}
{#if !readOnly}
<button
class="cursor-pointer rounded-lg border border-gray-100 p-1 text-xs text-gray-400 group-hover:block hover:text-gray-500 dark:border-gray-800 dark:text-gray-400 dark:hover:text-gray-300 md:hidden lg:-right-2"
title="Retry"
type="button"
on:click={() => dispatch("retry", { content: message.content, id: message.id })}
>
<CarbonRotate360 />
</button>
{/if}
</div>
{/if}
</div>
{/if}
| 0 |
hf_public_repos/chat-ui/src/lib/components | hf_public_repos/chat-ui/src/lib/components/chat/ChatInput.svelte | <script lang="ts">
import { createEventDispatcher, onMount } from "svelte";
export let value = "";
export let minRows = 1;
export let maxRows: null | number = null;
export let placeholder = "";
export let disabled = false;
// Approximate width from which we disable autofocus
const TABLET_VIEWPORT_WIDTH = 768;
let innerWidth = 0;
let textareaElement: HTMLTextAreaElement;
const dispatch = createEventDispatcher<{ submit: void }>();
$: minHeight = `${1 + minRows * 1.5}em`;
$: maxHeight = maxRows ? `${1 + maxRows * 1.5}em` : `auto`;
function handleKeydown(event: KeyboardEvent) {
// submit on enter
if (event.key === "Enter" && !event.shiftKey) {
event.preventDefault();
dispatch("submit"); // use a custom event instead of `event.target.form.requestSubmit()` as it does not work on Safari 14
}
}
onMount(() => {
if (innerWidth > TABLET_VIEWPORT_WIDTH) {
textareaElement.focus();
}
});
</script>
<svelte:window bind:innerWidth />
<div class="relative min-w-0 flex-1">
<pre
class="scrollbar-custom invisible overflow-x-hidden overflow-y-scroll whitespace-pre-wrap break-words p-3"
aria-hidden="true"
style="min-height: {minHeight}; max-height: {maxHeight}">{(value || " ") + "\n"}</pre>
<textarea
enterkeyhint="send"
tabindex="0"
rows="1"
class="scrollbar-custom absolute top-0 m-0 h-full w-full resize-none scroll-p-3 overflow-x-hidden overflow-y-scroll border-0 bg-transparent p-3 outline-none focus:ring-0 focus-visible:ring-0"
bind:value
bind:this={textareaElement}
{disabled}
on:keydown={handleKeydown}
on:keypress
{placeholder}
/>
</div>
<style>
pre,
textarea {
font-family: inherit;
box-sizing: border-box;
line-height: 1.5;
}
</style>
| 0 |
hf_public_repos/chat-ui/src/lib/components | hf_public_repos/chat-ui/src/lib/components/icons/IconChevron.svelte | <script lang="ts">
export let classNames = "";
</script>
<svg
width="1em"
height="1em"
viewBox="0 0 15 6"
class={classNames}
fill="none"
xmlns="http://www.w3.org/2000/svg"
>
<path
d="M1.67236 1L7.67236 7L13.6724 1"
stroke="currentColor"
stroke-width="2"
stroke-linecap="round"
stroke-linejoin="round"
/>
</svg>
| 0 |
hf_public_repos/chat-ui/src/lib/components | hf_public_repos/chat-ui/src/lib/components/icons/IconLoading.svelte | <script lang="ts">
export let classNames = "";
</script>
<div class={"inline-flex h-8 flex-none items-center gap-1 " + classNames}>
<div
class="h-1 w-1 flex-none animate-bounce rounded-full bg-gray-500 dark:bg-gray-400"
style="animation-delay: 0.25s;"
/>
<div
class="h-1 w-1 flex-none animate-bounce rounded-full bg-gray-500 dark:bg-gray-400"
style="animation-delay: 0.5s;"
/>
<div
class="h-1 w-1 flex-none animate-bounce rounded-full bg-gray-500 dark:bg-gray-400"
style="animation-delay: 0.75s;"
/>
</div>
| 0 |
hf_public_repos/chat-ui/src/lib/components | hf_public_repos/chat-ui/src/lib/components/icons/LogoHuggingFaceBorderless.svelte | <script lang="ts">
export let classNames = "";
</script>
<svg
class={classNames}
xmlns="http://www.w3.org/2000/svg"
width="1em"
height="1em"
fill="none"
viewBox="0 0 95 88"
>
<path fill="#FFD21E" d="M47.21 76.5a34.75 34.75 0 1 0 0-69.5 34.75 34.75 0 0 0 0 69.5Z" />
<path
fill="#FF9D0B"
d="M81.96 41.75a34.75 34.75 0 1 0-69.5 0 34.75 34.75 0 0 0 69.5 0Zm-73.5 0a38.75 38.75 0 1 1 77.5 0 38.75 38.75 0 0 1-77.5 0Z"
/>
<path
fill="#3A3B45"
d="M58.5 32.3c1.28.44 1.78 3.06 3.07 2.38a5 5 0 1 0-6.76-2.07c.61 1.15 2.55-.72 3.7-.32ZM34.95 32.3c-1.28.44-1.79 3.06-3.07 2.38a5 5 0 1 1 6.76-2.07c-.61 1.15-2.56-.72-3.7-.32ZM46.96 56.29c9.83 0 13-8.76 13-13.26 0-2.34-1.57-1.6-4.09-.36-2.33 1.15-5.46 2.74-8.9 2.74-7.19 0-13-6.88-13-2.38s3.16 13.26 13 13.26Z"
/>
<mask id="a" width="27" height="16" x="33" y="41" maskUnits="userSpaceOnUse">
<path
fill="#fff"
d="M46.96 56.29c9.83 0 13-8.76 13-13.26 0-2.34-1.57-1.6-4.09-.36-2.33 1.15-5.46 2.74-8.9 2.74-7.19 0-13-6.88-13-2.38s3.16 13.26 13 13.26Z"
/>
</mask>
<g mask="url(#a)">
<path
fill="#F94040"
d="M47.21 66.5a8.67 8.67 0 0 0 2.65-16.94c-.84-.26-1.73 2.6-2.65 2.6-.86 0-1.7-2.88-2.48-2.65a8.68 8.68 0 0 0 2.48 16.99Z"
/>
</g>
<path
fill="#FF9D0B"
d="M70.71 37a3.25 3.25 0 1 0 0-6.5 3.25 3.25 0 0 0 0 6.5ZM24.21 37a3.25 3.25 0 1 0 0-6.5 3.25 3.25 0 0 0 0 6.5ZM17.52 48c-1.62 0-3.06.66-4.07 1.87a5.97 5.97 0 0 0-1.33 3.76 7.1 7.1 0 0 0-1.94-.3c-1.55 0-2.95.59-3.94 1.66a5.8 5.8 0 0 0-.8 7 5.3 5.3 0 0 0-1.79 2.82c-.24.9-.48 2.8.8 4.74a5.22 5.22 0 0 0-.37 5.02c1.02 2.32 3.57 4.14 8.52 6.1 3.07 1.22 5.89 2 5.91 2.01a44.33 44.33 0 0 0 10.93 1.6c5.86 0 10.05-1.8 12.46-5.34 3.88-5.69 3.33-10.9-1.7-15.92-2.77-2.78-4.62-6.87-5-7.77-.78-2.66-2.84-5.62-6.25-5.62a5.7 5.7 0 0 0-4.6 2.46c-1-1.26-1.98-2.25-2.86-2.82A7.4 7.4 0 0 0 17.52 48Zm0 4c.51 0 1.14.22 1.82.65 2.14 1.36 6.25 8.43 7.76 11.18.5.92 1.37 1.31 2.14 1.31 1.55 0 2.75-1.53.15-3.48-3.92-2.93-2.55-7.72-.68-8.01.08-.02.17-.02.24-.02 1.7 0 2.45 2.93 2.45 2.93s2.2 5.52 5.98 9.3c3.77 3.77 3.97 6.8 1.22 10.83-1.88 2.75-5.47 3.58-9.16 3.58-3.81 0-7.73-.9-9.92-1.46-.11-.03-13.45-3.8-11.76-7 .28-.54.75-.76 1.34-.76 2.38 0 6.7 3.54 8.57 3.54.41 0 .7-.17.83-.6.79-2.85-12.06-4.05-10.98-8.17.2-.73.71-1.02 1.44-1.02 3.14 0 10.2 5.53 11.68 5.53.11 0 .2-.03.24-.1.74-1.2.33-2.04-4.9-5.2-5.21-3.16-8.88-5.06-6.8-7.33.24-.26.58-.38 1-.38 3.17 0 10.66 6.82 10.66 6.82s2.02 2.1 3.25 2.1c.28 0 .52-.1.68-.38.86-1.46-8.06-8.22-8.56-11.01-.34-1.9.24-2.85 1.31-2.85Z"
/>
<path
fill="#FFD21E"
d="M38.6 76.69c2.75-4.04 2.55-7.07-1.22-10.84-3.78-3.77-5.98-9.3-5.98-9.3s-.82-3.2-2.69-2.9c-1.87.3-3.24 5.08.68 8.01 3.91 2.93-.78 4.92-2.29 2.17-1.5-2.75-5.62-9.82-7.76-11.18-2.13-1.35-3.63-.6-3.13 2.2.5 2.79 9.43 9.55 8.56 11-.87 1.47-3.93-1.71-3.93-1.71s-9.57-8.71-11.66-6.44c-2.08 2.27 1.59 4.17 6.8 7.33 5.23 3.16 5.64 4 4.9 5.2-.75 1.2-12.28-8.53-13.36-4.4-1.08 4.11 11.77 5.3 10.98 8.15-.8 2.85-9.06-5.38-10.74-2.18-1.7 3.21 11.65 6.98 11.76 7.01 4.3 1.12 15.25 3.49 19.08-2.12Z"
/>
<path
fill="#FF9D0B"
d="M77.4 48c1.62 0 3.07.66 4.07 1.87a5.97 5.97 0 0 1 1.33 3.76 7.1 7.1 0 0 1 1.95-.3c1.55 0 2.95.59 3.94 1.66a5.8 5.8 0 0 1 .8 7 5.3 5.3 0 0 1 1.78 2.82c.24.9.48 2.8-.8 4.74a5.22 5.22 0 0 1 .37 5.02c-1.02 2.32-3.57 4.14-8.51 6.1-3.08 1.22-5.9 2-5.92 2.01a44.33 44.33 0 0 1-10.93 1.6c-5.86 0-10.05-1.8-12.46-5.34-3.88-5.69-3.33-10.9 1.7-15.92 2.78-2.78 4.63-6.87 5.01-7.77.78-2.66 2.83-5.62 6.24-5.62a5.7 5.7 0 0 1 4.6 2.46c1-1.26 1.98-2.25 2.87-2.82A7.4 7.4 0 0 1 77.4 48Zm0 4c-.51 0-1.13.22-1.82.65-2.13 1.36-6.25 8.43-7.76 11.18a2.43 2.43 0 0 1-2.14 1.31c-1.54 0-2.75-1.53-.14-3.48 3.91-2.93 2.54-7.72.67-8.01a1.54 1.54 0 0 0-.24-.02c-1.7 0-2.45 2.93-2.45 2.93s-2.2 5.52-5.97 9.3c-3.78 3.77-3.98 6.8-1.22 10.83 1.87 2.75 5.47 3.58 9.15 3.58 3.82 0 7.73-.9 9.93-1.46.1-.03 13.45-3.8 11.76-7-.29-.54-.75-.76-1.34-.76-2.38 0-6.71 3.54-8.57 3.54-.42 0-.71-.17-.83-.6-.8-2.85 12.05-4.05 10.97-8.17-.19-.73-.7-1.02-1.44-1.02-3.14 0-10.2 5.53-11.68 5.53-.1 0-.19-.03-.23-.1-.74-1.2-.34-2.04 4.88-5.2 5.23-3.16 8.9-5.06 6.8-7.33-.23-.26-.57-.38-.98-.38-3.18 0-10.67 6.82-10.67 6.82s-2.02 2.1-3.24 2.1a.74.74 0 0 1-.68-.38c-.87-1.46 8.05-8.22 8.55-11.01.34-1.9-.24-2.85-1.31-2.85Z"
/>
<path
fill="#FFD21E"
d="M56.33 76.69c-2.75-4.04-2.56-7.07 1.22-10.84 3.77-3.77 5.97-9.3 5.97-9.3s.82-3.2 2.7-2.9c1.86.3 3.23 5.08-.68 8.01-3.92 2.93.78 4.92 2.28 2.17 1.51-2.75 5.63-9.82 7.76-11.18 2.13-1.35 3.64-.6 3.13 2.2-.5 2.79-9.42 9.55-8.55 11 .86 1.47 3.92-1.71 3.92-1.71s9.58-8.71 11.66-6.44c2.08 2.27-1.58 4.17-6.8 7.33-5.23 3.16-5.63 4-4.9 5.2.75 1.2 12.28-8.53 13.36-4.4 1.08 4.11-11.76 5.3-10.97 8.15.8 2.85 9.05-5.38 10.74-2.18 1.69 3.21-11.65 6.98-11.76 7.01-4.31 1.12-15.26 3.49-19.08-2.12Z"
/>
</svg>
| 0 |
hf_public_repos/chat-ui/src/lib/components | hf_public_repos/chat-ui/src/lib/components/icons/Logo.svelte | <script lang="ts">
import { page } from "$app/stores";
import { PUBLIC_APP_ASSETS, PUBLIC_APP_NAME, PUBLIC_ORIGIN } from "$env/static/public";
import { base } from "$app/paths";
export let classNames = "";
</script>
{#if PUBLIC_APP_ASSETS === "chatui"}
<svg
height="30"
width="30"
viewBox="0 0 30 30"
xmlns="http://www.w3.org/2000/svg"
class={classNames}
>
<path
d="M4.06151 14.1464C4.06151 11.8818 4.9611 9.71004 6.56237 8.10877C8.16364 6.5075 10.3354 5.60791 12.6 5.60791H16.5231C18.6254 5.60791 20.6416 6.44307 22.1282 7.92965C23.6148 9.41624 24.45 11.4325 24.45 13.5348C24.45 15.6372 23.6148 17.6534 22.1282 19.14C20.6416 20.6266 18.6254 21.4618 16.5231 21.4618H7.08459L4.63844 23.8387C4.59547 23.8942 4.53557 23.9343 4.4678 23.9527C4.40004 23.9712 4.32811 23.9671 4.2629 23.941C4.1977 23.9149 4.14276 23.8683 4.10643 23.8082C4.07009 23.7481 4.05432 23.6778 4.06151 23.6079V14.1464Z"
class="fill-primary-500"
/>
</svg>
{:else}
<object
class={classNames}
data="{PUBLIC_ORIGIN || $page.url.origin}{base}/{PUBLIC_APP_ASSETS}/favicon.svg"
title="{PUBLIC_APP_NAME} logo"
/>
{/if}
| 0 |
hf_public_repos/chat-ui/src/lib/components | hf_public_repos/chat-ui/src/lib/components/icons/IconDazzled.svelte | <script lang="ts">
export let classNames = "";
</script>
<svg
xmlns="http://www.w3.org/2000/svg"
width="1em"
height="1em"
class={classNames}
fill="none"
viewBox="0 0 26 23"
>
<path
fill="url(#a)"
d="M.93 10.65A10.17 10.17 0 0 1 11.11.48h4.67a9.45 9.45 0 0 1 0 18.89H4.53L1.62 22.2a.38.38 0 0 1-.69-.28V10.65Z"
/>
<path
fill="#000"
fill-rule="evenodd"
d="M11.52 7.4a1.86 1.86 0 1 1-3.72 0 1.86 1.86 0 0 1 3.72 0Zm7.57 0a1.86 1.86 0 1 1-3.73 0 1.86 1.86 0 0 1 3.73 0ZM8.9 12.9a.55.55 0 0 0-.11.35.76.76 0 0 1-1.51 0c0-.95.67-1.94 1.76-1.94 1.09 0 1.76 1 1.76 1.94H9.3a.55.55 0 0 0-.12-.35c-.06-.07-.1-.08-.13-.08s-.08 0-.14.08Zm4.04 0a.55.55 0 0 0-.12.35h-1.51c0-.95.68-1.94 1.76-1.94 1.1 0 1.77 1 1.77 1.94h-1.51a.55.55 0 0 0-.12-.35c-.06-.07-.11-.08-.14-.08-.02 0-.07 0-.13.08Zm-1.89.79c-.02 0-.07-.01-.13-.08a.55.55 0 0 1-.12-.36h-1.5c0 .95.67 1.95 1.75 1.95 1.1 0 1.77-1 1.77-1.95h-1.51c0 .16-.06.28-.12.36-.06.07-.11.08-.14.08Zm4.04 0c-.03 0-.08-.01-.14-.08a.55.55 0 0 1-.12-.36h-1.5c0 .95.67 1.95 1.76 1.95 1.08 0 1.76-1 1.76-1.95h-1.51c0 .16-.06.28-.12.36-.06.07-.11.08-.13.08Zm1.76-.44c0-.16.05-.28.12-.35.06-.07.1-.08.13-.08s.08 0 .14.08c.06.07.11.2.11.35a.76.76 0 0 0 1.51 0c0-.95-.67-1.94-1.76-1.94-1.09 0-1.76 1-1.76 1.94h1.5Z"
clip-rule="evenodd"
/>
<defs>
<radialGradient
id="a"
cx="0"
cy="0"
r="1"
gradientTransform="matrix(0 31.37 -34.85 0 13.08 -9.02)"
gradientUnits="userSpaceOnUse"
>
<stop stop-color="#FFD21E" />
<stop offset="1" stop-color="red" />
</radialGradient>
</defs>
</svg>
| 0 |
hf_public_repos/chat-ui/src/lib/components | hf_public_repos/chat-ui/src/lib/components/icons/IconCopy.svelte | <script lang="ts">
export let classNames = "";
</script>
<svg
class={classNames}
xmlns="http://www.w3.org/2000/svg"
aria-hidden="true"
fill="currentColor"
focusable="false"
role="img"
width="1em"
height="1em"
preserveAspectRatio="xMidYMid meet"
viewBox="0 0 32 32"
>
<path
d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z"
transform="translate(0)"
/>
<path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)" /><rect
fill="none"
width="32"
height="32"
/>
</svg>
| 0 |
hf_public_repos/chat-ui/src/lib | hf_public_repos/chat-ui/src/lib/server/abortedGenerations.ts | // Shouldn't be needed if we dove into sveltekit internals, see https://github.com/huggingface/chat-ui/pull/88#issuecomment-1523173850
import { setTimeout } from "node:timers/promises";
import { collections } from "./database";
let closed = false;
process.on("SIGINT", () => {
closed = true;
});
export let abortedGenerations: Map<string, Date> = new Map();
async function maintainAbortedGenerations() {
while (!closed) {
await setTimeout(1000);
try {
const aborts = await collections.abortedGenerations.find({}).sort({ createdAt: 1 }).toArray();
abortedGenerations = new Map(
aborts.map(({ conversationId, createdAt }) => [conversationId.toString(), createdAt])
);
} catch (err) {
console.error(err);
}
}
}
maintainAbortedGenerations();
| 0 |
hf_public_repos/chat-ui/src/lib | hf_public_repos/chat-ui/src/lib/server/auth.ts | import { Issuer, BaseClient, type UserinfoResponse, TokenSet } from "openid-client";
import { addHours, addYears } from "date-fns";
import {
COOKIE_NAME,
OPENID_CLIENT_ID,
OPENID_CLIENT_SECRET,
OPENID_PROVIDER_URL,
OPENID_SCOPES,
} from "$env/static/private";
import { sha256 } from "$lib/utils/sha256";
import { z } from "zod";
import { dev } from "$app/environment";
import type { Cookies } from "@sveltejs/kit";
export interface OIDCSettings {
redirectURI: string;
}
export interface OIDCUserInfo {
token: TokenSet;
userData: UserinfoResponse;
}
export const requiresUser = !!OPENID_CLIENT_ID && !!OPENID_CLIENT_SECRET;
export function refreshSessionCookie(cookies: Cookies, sessionId: string) {
cookies.set(COOKIE_NAME, sessionId, {
path: "/",
// So that it works inside the space's iframe
sameSite: dev ? "lax" : "none",
secure: !dev,
httpOnly: true,
expires: addYears(new Date(), 1),
});
}
export const authCondition = (locals: App.Locals) => {
return locals.user
? { userId: locals.user._id }
: { sessionId: locals.sessionId, userId: { $exists: false } };
};
/**
* Generates a CSRF token using the user sessionId. Note that we don't need a secret because sessionId is enough.
*/
export async function generateCsrfToken(sessionId: string, redirectUrl: string): Promise<string> {
const data = {
expiration: addHours(new Date(), 1).getTime(),
redirectUrl,
};
return Buffer.from(
JSON.stringify({
data,
signature: await sha256(JSON.stringify(data) + "##" + sessionId),
})
).toString("base64");
}
async function getOIDCClient(settings: OIDCSettings): Promise<BaseClient> {
const issuer = await Issuer.discover(OPENID_PROVIDER_URL);
return new issuer.Client({
client_id: OPENID_CLIENT_ID,
client_secret: OPENID_CLIENT_SECRET,
redirect_uris: [settings.redirectURI],
response_types: ["code"],
});
}
export async function getOIDCAuthorizationUrl(
settings: OIDCSettings,
params: { sessionId: string }
): Promise<string> {
const client = await getOIDCClient(settings);
const csrfToken = await generateCsrfToken(params.sessionId, settings.redirectURI);
const url = client.authorizationUrl({
scope: OPENID_SCOPES,
state: csrfToken,
});
return url;
}
export async function getOIDCUserData(settings: OIDCSettings, code: string): Promise<OIDCUserInfo> {
const client = await getOIDCClient(settings);
const token = await client.callback(settings.redirectURI, { code });
const userData = await client.userinfo(token);
return { token, userData };
}
export async function validateAndParseCsrfToken(
token: string,
sessionId: string
): Promise<{
/** This is the redirect url that was passed to the OIDC provider */
redirectUrl: string;
} | null> {
try {
const { data, signature } = z
.object({
data: z.object({
expiration: z.number().int(),
redirectUrl: z.string().url(),
}),
signature: z.string().length(64),
})
.parse(JSON.parse(token));
const reconstructSign = await sha256(JSON.stringify(data) + "##" + sessionId);
if (data.expiration > Date.now() && signature === reconstructSign) {
return { redirectUrl: data.redirectUrl };
}
} catch (e) {
console.error(e);
}
return null;
}
| 0 |
hf_public_repos/chat-ui/src/lib | hf_public_repos/chat-ui/src/lib/server/models.ts | import { HF_ACCESS_TOKEN, MODELS, OLD_MODELS } from "$env/static/private";
import { z } from "zod";
const modelsRaw = z
.array(
z.object({
/** Used as an identifier in DB */
id: z.string().optional(),
/** Used to link to the model page, and for inference */
name: z.string().min(1),
displayName: z.string().min(1).optional(),
description: z.string().min(1).optional(),
websiteUrl: z.string().url().optional(),
modelUrl: z.string().url().optional(),
datasetName: z.string().min(1).optional(),
datasetUrl: z.string().url().optional(),
userMessageToken: z.string().min(1),
assistantMessageToken: z.string().min(1),
messageEndToken: z.string().min(1).optional(),
preprompt: z.string().default(""),
prepromptUrl: z.string().url().optional(),
promptExamples: z
.array(
z.object({
title: z.string().min(1),
prompt: z.string().min(1),
})
)
.optional(),
endpoints: z
.array(
z.object({
url: z.string().url(),
authorization: z.string().min(1).default(`Bearer ${HF_ACCESS_TOKEN}`),
weight: z.number().int().positive().default(1),
})
)
.optional(),
parameters: z
.object({
temperature: z.number().min(0).max(1),
truncate: z.number().int().positive(),
max_new_tokens: z.number().int().positive(),
stop: z.array(z.string()).optional(),
})
.passthrough()
.optional(),
})
)
.parse(JSON.parse(MODELS));
export const models = await Promise.all(
modelsRaw.map(async (m) => ({
...m,
id: m.id || m.name,
displayName: m.displayName || m.name,
preprompt: m.prepromptUrl ? await fetch(m.prepromptUrl).then((r) => r.text()) : m.preprompt,
}))
);
// Models that have been deprecated
export const oldModels = OLD_MODELS
? z
.array(
z.object({
id: z.string().optional(),
name: z.string().min(1),
displayName: z.string().min(1).optional(),
})
)
.parse(JSON.parse(OLD_MODELS))
.map((m) => ({ ...m, id: m.id || m.name, displayName: m.displayName || m.name }))
: [];
export type BackendModel = (typeof models)[0];
export const defaultModel = models[0];
export const validateModel = (_models: BackendModel[]) => {
// Zod enum function requires 2 parameters
return z.enum([_models[0].id, ..._models.slice(1).map((m) => m.id)]);
};
| 0 |
hf_public_repos/chat-ui/src/lib | hf_public_repos/chat-ui/src/lib/server/modelEndpoint.ts | import { HF_ACCESS_TOKEN, HF_API_ROOT } from "$env/static/private";
import { sum } from "$lib/utils/sum";
import type { BackendModel } from "./models";
/**
* Find a random load-balanced endpoint
*/
export function modelEndpoint(model: BackendModel): {
url: string;
authorization: string;
weight: number;
} {
if (!model.endpoints) {
return {
url: `${HF_API_ROOT}/${model.name}`,
authorization: `Bearer ${HF_ACCESS_TOKEN}`,
weight: 1,
};
}
const endpoints = model.endpoints;
const totalWeight = sum(endpoints.map((e) => e.weight));
let random = Math.random() * totalWeight;
for (const endpoint of endpoints) {
if (random < endpoint.weight) {
return endpoint;
}
random -= endpoint.weight;
}
throw new Error("Invalid config, no endpoint found");
}
| 0 |
hf_public_repos/chat-ui/src/lib | hf_public_repos/chat-ui/src/lib/server/generateFromDefaultEndpoint.ts | import { defaultModel } from "$lib/server/models";
import { modelEndpoint } from "./modelEndpoint";
import { textGeneration } from "@huggingface/inference";
import { trimSuffix } from "$lib/utils/trimSuffix";
import { trimPrefix } from "$lib/utils/trimPrefix";
import { PUBLIC_SEP_TOKEN } from "$lib/constants/publicSepToken";
interface Parameters {
temperature: number;
truncate: number;
max_new_tokens: number;
stop: string[];
}
export async function generateFromDefaultEndpoint(
prompt: string,
parameters?: Partial<Parameters>
) {
const newParameters = {
...defaultModel.parameters,
...parameters,
return_full_text: false,
};
const endpoint = modelEndpoint(defaultModel);
let { generated_text } = await textGeneration(
{
model: endpoint.url,
inputs: prompt,
parameters: newParameters,
},
{
fetch: (url, options) =>
fetch(url, {
...options,
headers: { ...options?.headers, Authorization: endpoint.authorization },
}),
}
);
generated_text = trimSuffix(trimPrefix(generated_text, "<|startoftext|>"), PUBLIC_SEP_TOKEN);
return generated_text;
}
| 0 |
hf_public_repos/chat-ui/src/lib | hf_public_repos/chat-ui/src/lib/server/database.ts | import { MONGODB_URL, MONGODB_DB_NAME, MONGODB_DIRECT_CONNECTION } from "$env/static/private";
import { MongoClient } from "mongodb";
import type { Conversation } from "$lib/types/Conversation";
import type { SharedConversation } from "$lib/types/SharedConversation";
import type { WebSearch } from "$lib/types/WebSearch";
import type { AbortedGeneration } from "$lib/types/AbortedGeneration";
import type { Settings } from "$lib/types/Settings";
import type { User } from "$lib/types/User";
import type { MessageEvent } from "$lib/types/MessageEvent";
if (!MONGODB_URL) {
throw new Error(
"Please specify the MONGODB_URL environment variable inside .env.local. Set it to mongodb://localhost:27017 if you are running MongoDB locally, or to a MongoDB Atlas free instance for example."
);
}
const client = new MongoClient(MONGODB_URL, {
directConnection: MONGODB_DIRECT_CONNECTION === "true",
});
export const connectPromise = client.connect().catch(console.error);
const db = client.db(MONGODB_DB_NAME + (import.meta.env.MODE === "test" ? "-test" : ""));
const conversations = db.collection<Conversation>("conversations");
const sharedConversations = db.collection<SharedConversation>("sharedConversations");
const abortedGenerations = db.collection<AbortedGeneration>("abortedGenerations");
const settings = db.collection<Settings>("settings");
const users = db.collection<User>("users");
const webSearches = db.collection<WebSearch>("webSearches");
const messageEvents = db.collection<MessageEvent>("messageEvents");
export { client, db };
export const collections = {
conversations,
sharedConversations,
abortedGenerations,
settings,
users,
webSearches,
messageEvents,
};
client.on("open", () => {
conversations
.createIndex(
{ sessionId: 1, updatedAt: -1 },
{ partialFilterExpression: { sessionId: { $exists: true } } }
)
.catch(console.error);
conversations
.createIndex(
{ userId: 1, updatedAt: -1 },
{ partialFilterExpression: { userId: { $exists: true } } }
)
.catch(console.error);
webSearches.createIndex({ sessionId: 1, updatedAt: -1 }).catch(console.error);
abortedGenerations.createIndex({ updatedAt: 1 }, { expireAfterSeconds: 30 }).catch(console.error);
abortedGenerations.createIndex({ conversationId: 1 }, { unique: true }).catch(console.error);
sharedConversations.createIndex({ hash: 1 }, { unique: true }).catch(console.error);
settings.createIndex({ sessionId: 1 }, { unique: true, sparse: true }).catch(console.error);
settings.createIndex({ userId: 1 }, { unique: true, sparse: true }).catch(console.error);
users.createIndex({ hfUserId: 1 }, { unique: true }).catch(console.error);
users.createIndex({ sessionId: 1 }, { unique: true, sparse: true }).catch(console.error);
messageEvents.createIndex({ createdAt: 1 }, { expireAfterSeconds: 60 }).catch(console.error);
});
| 0 |
hf_public_repos/chat-ui/src/lib/server | hf_public_repos/chat-ui/src/lib/server/websearch/summarizeWeb.ts | import { HF_ACCESS_TOKEN } from "$env/static/private";
import { HfInference } from "@huggingface/inference";
import { generateFromDefaultEndpoint } from "../generateFromDefaultEndpoint";
import type { BackendModel } from "../models";
export async function summarizeWeb(content: string, query: string, model: BackendModel) {
// if HF_ACCESS_TOKEN is set, we use a HF dedicated endpoint for summarization
try {
if (HF_ACCESS_TOKEN) {
const summary = (
await new HfInference(HF_ACCESS_TOKEN).summarization({
model: "facebook/bart-large-cnn",
inputs: content,
parameters: {
max_length: 512,
},
})
).summary_text;
return summary;
}
} catch (e) {
console.log(e);
}
// else we use the LLM to generate a summary
const summaryPrompt =
model.userMessageToken +
content
.split(" ")
.slice(0, model.parameters?.truncate ?? 0)
.join(" ") +
model.messageEndToken +
model.userMessageToken +
`The text above should be summarized to best answer the query: ${query}.` +
model.messageEndToken +
model.assistantMessageToken +
"Summary: ";
const summary = await generateFromDefaultEndpoint(summaryPrompt).then((txt: string) =>
txt.trim()
);
return summary;
}
| 0 |
hf_public_repos/chat-ui/src/lib/server | hf_public_repos/chat-ui/src/lib/server/websearch/searchWeb.ts | import { SERPAPI_KEY, SERPER_API_KEY } from "$env/static/private";
import { getJson } from "serpapi";
import type { GoogleParameters } from "serpapi";
// Show result as JSON
export async function searchWeb(query: string) {
if (SERPER_API_KEY) {
return await searchWebSerper(query);
}
if (SERPAPI_KEY) {
return await searchWebSerpApi(query);
}
throw new Error("No Serper.dev or SerpAPI key found");
}
export async function searchWebSerper(query: string) {
const params = {
q: query,
hl: "en",
gl: "us",
};
const response = await fetch("https://google.serper.dev/search", {
method: "POST",
body: JSON.stringify(params),
headers: {
"x-api-key": SERPER_API_KEY,
"Content-type": "application/json; charset=UTF-8",
},
});
/* eslint-disable @typescript-eslint/no-explicit-any */
const data = (await response.json()) as Record<string, any>;
if (!response.ok) {
throw new Error(
data["message"] ??
`Serper API returned error code ${response.status} - ${response.statusText}`
);
}
return {
organic_results: data["organic"] ?? [],
knowledge_graph: data["knowledgeGraph"] ?? null,
answer_box: data["answerBox"] ?? null,
};
}
export async function searchWebSerpApi(query: string) {
const params = {
q: query,
hl: "en",
gl: "us",
google_domain: "google.com",
api_key: SERPAPI_KEY,
} satisfies GoogleParameters;
// Show result as JSON
const response = await getJson("google", params);
return response;
}
| 0 |
hf_public_repos/chat-ui/src/lib/server | hf_public_repos/chat-ui/src/lib/server/websearch/parseWeb.ts | import { JSDOM, VirtualConsole } from "jsdom";
function removeTags(node: Node) {
if (node.hasChildNodes()) {
node.childNodes.forEach((childNode) => {
if (node.nodeName === "SCRIPT" || node.nodeName === "STYLE") {
node.removeChild(childNode);
} else {
removeTags(childNode);
}
});
}
}
function naiveInnerText(node: Node): string {
const Node = node; // We need Node(DOM's Node) for the constants, but Node doesn't exist in the nodejs global space, and any Node instance references the constants through the prototype chain
return [...node.childNodes]
.map((childNode) => {
switch (childNode.nodeType) {
case Node.TEXT_NODE:
return node.textContent;
case Node.ELEMENT_NODE:
return naiveInnerText(childNode);
default:
return "";
}
})
.join("\n");
}
export async function parseWeb(url: string) {
const abortController = new AbortController();
setTimeout(() => abortController.abort(), 10000);
const htmlString = await fetch(url, { signal: abortController.signal })
.then((response) => response.text())
.catch((err) => console.log(err));
const virtualConsole = new VirtualConsole();
virtualConsole.on("error", () => {
// No-op to skip console errors.
});
// put the html string into a DOM
const dom = new JSDOM(htmlString ?? "", {
virtualConsole,
});
const body = dom.window.document.querySelector("body");
if (!body) throw new Error("body of the webpage is null");
removeTags(body);
// recursively extract text content from the body and then remove newlines and multiple spaces
const text = (naiveInnerText(body) ?? "").replace(/ {2}|\r\n|\n|\r/gm, "");
return text;
}
| 0 |
hf_public_repos/chat-ui/src/lib/server | hf_public_repos/chat-ui/src/lib/server/websearch/generateQuery.ts | import type { Message } from "$lib/types/Message";
import { generateFromDefaultEndpoint } from "../generateFromDefaultEndpoint";
import type { BackendModel } from "../models";
export async function generateQuery(messages: Message[], model: BackendModel) {
const promptSearchQuery =
model.userMessageToken +
"The following messages were written by a user, trying to answer a question." +
model.messageEndToken +
messages
.filter((message) => message.from === "user")
.map((message) => model.userMessageToken + message.content + model.messageEndToken) +
model.userMessageToken +
"What plain-text english sentence would you input into Google to answer the last question? Answer with a short (10 words max) simple sentence." +
model.messageEndToken +
model.assistantMessageToken +
"Query: ";
const searchQuery = await generateFromDefaultEndpoint(promptSearchQuery).then((query) => {
const arr = query.split(/\r?\n/);
return arr[0].length > 0 ? arr[0] : arr[1];
});
return searchQuery;
}
| 0 |
hf_public_repos | hf_public_repos/tokenizers/README.md | <p align="center">
<br>
<img src="https://huggingface.co/landing/assets/tokenizers/tokenizers-logo.png" width="600"/>
<br>
<p>
<p align="center">
<img alt="Build" src="https://github.com/huggingface/tokenizers/workflows/Rust/badge.svg">
<a href="https://github.com/huggingface/tokenizers/blob/main/LICENSE">
<img alt="GitHub" src="https://img.shields.io/github/license/huggingface/tokenizers.svg?color=blue&cachedrop">
</a>
<a href="https://pepy.tech/project/tokenizers">
<img src="https://pepy.tech/badge/tokenizers/week" />
</a>
</p>
Provides an implementation of today's most used tokenizers, with a focus on performance and
versatility.
## Main features:
- Train new vocabularies and tokenize, using today's most used tokenizers.
- Extremely fast (both training and tokenization), thanks to the Rust implementation. Takes
less than 20 seconds to tokenize a GB of text on a server's CPU.
- Easy to use, but also extremely versatile.
- Designed for research and production.
- Normalization comes with alignments tracking. It's always possible to get the part of the
original sentence that corresponds to a given token.
- Does all the pre-processing: Truncate, Pad, add the special tokens your model needs.
## Bindings
We provide bindings to the following languages (more to come!):
- [Rust](https://github.com/huggingface/tokenizers/tree/main/tokenizers) (Original implementation)
- [Python](https://github.com/huggingface/tokenizers/tree/main/bindings/python)
- [Node.js](https://github.com/huggingface/tokenizers/tree/main/bindings/node)
- [Ruby](https://github.com/ankane/tokenizers-ruby) (Contributed by @ankane, external repo)
## Quick example using Python:
Choose your model between Byte-Pair Encoding, WordPiece or Unigram and instantiate a tokenizer:
```python
from tokenizers import Tokenizer
from tokenizers.models import BPE
tokenizer = Tokenizer(BPE())
```
You can customize how pre-tokenization (e.g., splitting into words) is done:
```python
from tokenizers.pre_tokenizers import Whitespace
tokenizer.pre_tokenizer = Whitespace()
```
Then training your tokenizer on a set of files just takes two lines of codes:
```python
from tokenizers.trainers import BpeTrainer
trainer = BpeTrainer(special_tokens=["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]"])
tokenizer.train(files=["wiki.train.raw", "wiki.valid.raw", "wiki.test.raw"], trainer=trainer)
```
Once your tokenizer is trained, encode any text with just one line:
```python
output = tokenizer.encode("Hello, y'all! How are you 😁 ?")
print(output.tokens)
# ["Hello", ",", "y", "'", "all", "!", "How", "are", "you", "[UNK]", "?"]
```
Check the [python documentation](https://huggingface.co/docs/tokenizers/index) or the
[python quicktour](https://huggingface.co/docs/tokenizers/python/latest/quicktour.html) to learn
more!
| 0 |
hf_public_repos | hf_public_repos/tokenizers/RELEASE.md | ## How to release
# Before the release
Simple checklist on how to make releases for `tokenizers`.
- Freeze `master` branch.
- Run all tests (Check CI has properly run)
- If any significant work, check benchmarks:
- `cd tokenizers && cargo bench` (needs to be run on latest release tag to measure difference if it's your first time)
- Run all `transformers` tests. (`transformers` is a big user of `tokenizers` we need
to make sure we don't break it, testing is one way to make sure nothing unforeseen
has been done.)
- Run all fast tests at the VERY least (not just the tokenization tests). (`RUN_PIPELINE_TESTS=1 CUDA_VISIBLE_DEVICES=-1 pytest -sv tests/`)
- When all *fast* tests work, then we can also (it's recommended) run the whole `transformers`
test suite.
- Rebase this [PR](https://github.com/huggingface/transformers/pull/16708).
This will create new docker images ready to run the tests suites with `tokenizers` from the main branch.
- Wait for actions to finish
- Rebase this [PR](https://github.com/huggingface/transformers/pull/16712)
This will run the actual full test suite.
- Check the results.
- **If any breaking change has been done**, make sure the version can safely be increased for transformers users (`tokenizers` version need to make sure users don't upgrade before `transformers` has). [link](https://github.com/huggingface/transformers/blob/main/setup.py#L154)
For instance `tokenizers>=0.10,<0.11` so we can safely upgrade to `0.11` without impacting
current users
- Then start a new PR containing all desired code changes from the following steps.
- You will `Create release` after the code modifications are on `master`.
# Rust
- `tokenizers` (rust, python & node) versions don't have to be in sync but it's
very common to release for all versions at once for new features.
- Edit `Cargo.toml` to reflect new version
- Edit `CHANGELOG.md`:
- Add relevant PRs that were added (python PRs do not belong for instance).
- Add links at the end of the files.
- Go to [Releases](https://github.com/huggingface/tokenizers/releases)
- Create new Release:
- Mark it as pre-release
- Use new version name with a new tag (create on publish) `vX.X.X`.
- Copy paste the new part of the `CHANGELOG.md`
- ⚠️ Click on `Publish release`. This will start the whole process of building a uploading
the new version on `crates.io`, there's no going back after this
- Go to the [Actions](https://github.com/huggingface/tokenizers/actions) tab and check everything works smoothly.
- If anything fails, you need to fix the CI/CD to make it work again. Since your package was not uploaded to the repository properly, you can try again.
# Python
- Edit `bindings/python/setup.py` to reflect new version.
- Edit `bindings/python/py_src/tokenizers/__init__.py` to reflect new version.
- Edit `CHANGELOG.md`:
- Add relevant PRs that were added (node PRs do not belong for instance).
- Add links at the end of the files.
- Go to [Releases](https://github.com/huggingface/tokenizers/releases)
- Create new Release:
- Mark it as pre-release
- Use new version name with a new tag (create on publish) `python-vX.X.X`.
- Copy paste the new part of the `CHANGELOG.md`
- ⚠️ Click on `Publish release`. This will start the whole process of building a uploading
the new version on `pypi`, there's no going back after this
- Go to the [Actions](https://github.com/huggingface/tokenizers/actions) tab and check everything works smoothly.
- If anything fails, you need to fix the CI/CD to make it work again. Since your package was not uploaded to the repository properly, you can try again.
- This CI/CD has 3 distinct builds, `Pypi`(normal), `conda` and `extra`. `Extra` is REALLY slow (~4h), this is normal since it has to rebuild many things, but enables the wheel to be available for old Linuxes
# Node
- Edit `bindings/node/package.json` to reflect new version.
- Edit `CHANGELOG.md`:
- Add relevant PRs that were added (python PRs do not belong for instance).
- Add links at the end of the files.
- Go to [Releases](https://github.com/huggingface/tokenizers/releases)
- Create new Release:
- Mark it as pre-release
- Use new version name with a new tag (create on publish) `node-vX.X.X`.
- Copy paste the new part of the `CHANGELOG.md`
- ⚠️ Click on `Publish release`. This will start the whole process of building a uploading
the new version on `npm`, there's no going back after this
- Go to the [Actions](https://github.com/huggingface/tokenizers/actions) tab and check everything works smoothly.
- If anything fails, you need to fix the CI/CD to make it work again. Since your package was not uploaded to the repository properly, you can try again.
# Testing the CI/CD for release
If you want to make modifications to the CI/CD of the release GH actions, you need
to :
- **Comment the part that uploads the artifacts** to `crates.io`, `PyPi` or `npm`.
- Change the trigger mecanism so it can trigger every time you push to your branch.
- Keep pushing your changes until the artifacts are properly created.
| 0 |
hf_public_repos | hf_public_repos/tokenizers/LICENSE | Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
| 0 |
hf_public_repos | hf_public_repos/tokenizers/CITATION.cff | # This CITATION.cff file was generated with cffinit.
# Visit https://bit.ly/cffinit to generate yours today!
cff-version: 1.2.0
title: HuggingFace's Tokenizers
message: >-
Fast State-of-the-Art Tokenizers optimized for Research
and Production.
type: software
authors:
- given-names: Anthony
family-names: Moi
email: [email protected]
affiliation: HuggingFace
- given-names: Nicolas
family-names: Patry
affiliation: HuggingFace
repository-code: 'https://github.com/huggingface/tokenizers'
url: 'https://github.com/huggingface/tokenizers'
repository: 'https://huggingface.co'
abstract: >-
Fast State-of-the-Art Tokenizers optimized for Research
and Production.
keywords:
- Rust
- Tokenizer
- NLP
license: Apache-2.0
commit: 37372b6
version: 0.13.3
date-released: '2023-04-05'
| 0 |
hf_public_repos/tokenizers | hf_public_repos/tokenizers/docs/README.md | ## Requirements
In order to generate the documentation, it is necessary to have a Python environment with the
following:
```python
pip install sphinx sphinx_rtd_theme setuptools_rust
```
It is also necessary to have the `tokenizers` library in this same environment, for Sphinx to
generate all the API Reference and links properly. If you want to visualize the documentation with
some modifications made to the Python bindings, make sure you build it from source.
## Building the documentation
Once everything is setup, you can build the documentation automatically for all the languages
using the following command in the `/docs` folder:
```bash
make html_all
```
If you want to build only for a specific language, you can use:
```bash
make html O="-t python"
```
(Replacing `python` by the target language among `rust`, `node`, and `python`)
**NOTE**
If you are making any structural change to the documentation, it is recommended to clean the build
directory before rebuilding:
```bash
make clean && make html_all
```
| 0 |
hf_public_repos/tokenizers | hf_public_repos/tokenizers/docs/Makefile | # Minimal makefile for Sphinx documentation
#
# You can set these variables from the command line, and also
# from the environment for those with `?=`
SPHINXOPTS ?=
SPHINXBUILD ?= sphinx-build
BUILDDIR ?= build
SOURCEDIR = source
# Put it first so that "make" without argument is like "make html_all".
html_all:
@echo "Generating doc for Rust"
@$(SPHINXBUILD) -M html "$(SOURCEDIR)" "$(BUILDDIR)/rust" $(SPHINXOPTS) $(O) -t rust
@echo "Generating doc for Python"
@$(SPHINXBUILD) -M html "$(SOURCEDIR)" "$(BUILDDIR)/python" $(SPHINXOPTS) $(O) -t python
@echo "Generating doc for Node.js"
@$(SPHINXBUILD) -M html "$(SOURCEDIR)" "$(BUILDDIR)/node" $(SPHINXOPTS) $(O) -t node
.PHONY: html_all Makefile
# Catch-all target: route all unknown targets to Sphinx using the new
# "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS).
%: Makefile
@$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
| 0 |
hf_public_repos/tokenizers/docs | hf_public_repos/tokenizers/docs/source/conf.py | # Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath("./_ext"))
sys.path.insert(0, os.path.abspath("."))
# -- Project information -----------------------------------------------------
project = "tokenizers"
copyright = "2020, huggingface"
author = "huggingface"
# The full version, including alpha/beta/rc tags
release = ""
# -- Custom information ------------------------------------------------------
# The possible values for languages (used by `_ext/entities`)
languages = ["node", "rust", "python"]
# This defines the version used to generate links to docs.rs
rust_version = "latest"
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ["sphinx.ext.autodoc", "sphinx.ext.napoleon", "entities", "rust_doc", "toctree_tags"]
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = []
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = "sphinx_rtd_theme"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
html_theme_options = {"analytics_id": "UA-83738774-2"}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
def setup(app):
for language in languages:
if not tags.has(language):
exclude_patterns.append(f"tutorials/{language}/*")
app.add_css_file("css/huggingface.css")
app.add_css_file("css/code-snippets.css")
app.add_js_file("js/custom.js")
| 0 |
hf_public_repos/tokenizers/docs | hf_public_repos/tokenizers/docs/source/pipeline.rst | The tokenization pipeline
====================================================================================================
When calling :entity:`Tokenizer.encode` or :entity:`Tokenizer.encode_batch`, the input text(s) go
through the following pipeline:
- :ref:`normalization`
- :ref:`pre-tokenization`
- :ref:`model`
- :ref:`post-processing`
We'll see in details what happens during each of those steps in detail, as well as when you want to
:ref:`decode <decoding>` some token ids, and how the 🤗 Tokenizers library allows you to customize
each of those steps to your needs. If you're already familiar with those steps and want to learn by
seeing some code, jump to :ref:`our BERT from scratch example <example>`.
For the examples that require a :entity:`Tokenizer`, we will use the tokenizer we trained
in the :doc:`quicktour`, which you can load with:
.. only:: python
.. literalinclude:: ../../bindings/python/tests/documentation/test_pipeline.py
:language: python
:start-after: START reload_tokenizer
:end-before: END reload_tokenizer
:dedent: 8
.. only:: rust
.. literalinclude:: ../../tokenizers/tests/documentation.rs
:language: rust
:start-after: START pipeline_reload_tokenizer
:end-before: END pipeline_reload_tokenizer
:dedent: 4
.. only:: node
.. literalinclude:: ../../bindings/node/examples/documentation/pipeline.test.ts
:language: javascript
:start-after: START reload_tokenizer
:end-before: END reload_tokenizer
:dedent: 8
.. _normalization:
Normalization
----------------------------------------------------------------------------------------------------
Normalization is, in a nutshell, a set of operations you apply to a raw string to make it less
random or "cleaner". Common operations include stripping whitespace, removing accented characters
or lowercasing all text. If you're familiar with `Unicode normalization
<https://unicode.org/reports/tr15>`__, it is also a very common normalization operation applied
in most tokenizers.
Each normalization operation is represented in the 🤗 Tokenizers library by a
:entity:`Normalizer`, and you can combine several of those by using a
:entity:`normalizers.Sequence`. Here is a normalizer applying NFD Unicode normalization
and removing accents as an example:
.. only:: python
.. literalinclude:: ../../bindings/python/tests/documentation/test_pipeline.py
:language: python
:start-after: START setup_normalizer
:end-before: END setup_normalizer
:dedent: 8
.. only:: rust
.. literalinclude:: ../../tokenizers/tests/documentation.rs
:language: rust
:start-after: START pipeline_setup_normalizer
:end-before: END pipeline_setup_normalizer
:dedent: 4
.. only:: node
.. literalinclude:: ../../bindings/node/examples/documentation/pipeline.test.ts
:language: javascript
:start-after: START setup_normalizer
:end-before: END setup_normalizer
:dedent: 8
You can manually test that normalizer by applying it to any string:
.. only:: python
.. literalinclude:: ../../bindings/python/tests/documentation/test_pipeline.py
:language: python
:start-after: START test_normalizer
:end-before: END test_normalizer
:dedent: 8
.. only:: rust
.. literalinclude:: ../../tokenizers/tests/documentation.rs
:language: rust
:start-after: START pipeline_test_normalizer
:end-before: END pipeline_test_normalizer
:dedent: 4
.. only:: node
.. literalinclude:: ../../bindings/node/examples/documentation/pipeline.test.ts
:language: javascript
:start-after: START test_normalizer
:end-before: END test_normalizer
:dedent: 8
When building a :entity:`Tokenizer`, you can customize its normalizer by just changing
the corresponding attribute:
.. only:: python
.. literalinclude:: ../../bindings/python/tests/documentation/test_pipeline.py
:language: python
:start-after: START replace_normalizer
:end-before: END replace_normalizer
:dedent: 8
.. only:: rust
.. literalinclude:: ../../tokenizers/tests/documentation.rs
:language: rust
:start-after: START pipeline_replace_normalizer
:end-before: END pipeline_replace_normalizer
:dedent: 4
.. only:: node
.. literalinclude:: ../../bindings/node/examples/documentation/pipeline.test.ts
:language: javascript
:start-after: START replace_normalizer
:end-before: END replace_normalizer
:dedent: 8
Of course, if you change the way a tokenizer applies normalization, you should probably retrain it
from scratch afterward.
.. _pre-tokenization:
Pre-Tokenization
----------------------------------------------------------------------------------------------------
Pre-tokenization is the act of splitting a text into smaller objects that give an upper bound to
what your tokens will be at the end of training. A good way to think of this is that the
pre-tokenizer will split your text into "words" and then, your final tokens will be parts of those
words.
An easy way to pre-tokenize inputs is to split on spaces and punctuations, which is done by the
:entity:`pre_tokenizers.Whitespace` pre-tokenizer:
.. only:: python
.. literalinclude:: ../../bindings/python/tests/documentation/test_pipeline.py
:language: python
:start-after: START setup_pre_tokenizer
:end-before: END setup_pre_tokenizer
:dedent: 8
.. only:: rust
.. literalinclude:: ../../tokenizers/tests/documentation.rs
:language: rust
:start-after: START pipeline_setup_pre_tokenizer
:end-before: END pipeline_setup_pre_tokenizer
:dedent: 4
.. only:: node
.. literalinclude:: ../../bindings/node/examples/documentation/pipeline.test.ts
:language: javascript
:start-after: START setup_pre_tokenizer
:end-before: END setup_pre_tokenizer
:dedent: 8
The output is a list of tuples, with each tuple containing one word and its span in the original
sentence (which is used to determine the final :obj:`offsets` of our :entity:`Encoding`).
Note that splitting on punctuation will split contractions like :obj:`"I'm"` in this example.
You can combine together any :entity:`PreTokenizer` together. For
instance, here is a pre-tokenizer that will split on space, punctuation and digits, separating
numbers in their individual digits:
.. only:: python
.. literalinclude:: ../../bindings/python/tests/documentation/test_pipeline.py
:language: python
:start-after: START combine_pre_tokenizer
:end-before: END combine_pre_tokenizer
:dedent: 8
.. only:: rust
.. literalinclude:: ../../tokenizers/tests/documentation.rs
:language: rust
:start-after: START pipeline_combine_pre_tokenizer
:end-before: END pipeline_combine_pre_tokenizer
:dedent: 4
.. only:: node
.. literalinclude:: ../../bindings/node/examples/documentation/pipeline.test.ts
:language: javascript
:start-after: START combine_pre_tokenizer
:end-before: END combine_pre_tokenizer
:dedent: 8
As we saw in the :doc:`quicktour`, you can customize the pre-tokenizer of a
:entity:`Tokenizer` by just changing the corresponding attribute:
.. only:: python
.. literalinclude:: ../../bindings/python/tests/documentation/test_pipeline.py
:language: python
:start-after: START replace_pre_tokenizer
:end-before: END replace_pre_tokenizer
:dedent: 8
.. only:: rust
.. literalinclude:: ../../tokenizers/tests/documentation.rs
:language: rust
:start-after: START pipeline_replace_pre_tokenizer
:end-before: END pipeline_replace_pre_tokenizer
:dedent: 4
.. only:: node
.. literalinclude:: ../../bindings/node/examples/documentation/pipeline.test.ts
:language: javascript
:start-after: START replace_pre_tokenizer
:end-before: END replace_pre_tokenizer
:dedent: 8
Of course, if you change the way the pre-tokenizer, you should probably retrain your tokenizer from
scratch afterward.
.. _model:
The Model
----------------------------------------------------------------------------------------------------
Once the input texts are normalized and pre-tokenized, the :entity:`Tokenizer` applies the model on
the pre-tokens. This is the part of the pipeline that needs training on your corpus (or that has
been trained if you are using a pretrained tokenizer).
The role of the model is to split your "words" into tokens, using the rules it has learned. It's
also responsible for mapping those tokens to their corresponding IDs in the vocabulary of the model.
This model is passed along when intializing the :entity:`Tokenizer` so you already know
how to customize this part. Currently, the 🤗 Tokenizers library supports:
- :entity:`models.BPE`
- :entity:`models.Unigram`
- :entity:`models.WordLevel`
- :entity:`models.WordPiece`
For more details about each model and its behavior, you can check `here <components#models>`__
.. _post-processing:
Post-Processing
----------------------------------------------------------------------------------------------------
Post-processing is the last step of the tokenization pipeline, to perform any additional
transformation to the :entity:`Encoding` before it's returned, like adding potential
special tokens.
As we saw in the quick tour, we can customize the post processor of a :entity:`Tokenizer`
by setting the corresponding attribute. For instance, here is how we can post-process to make the
inputs suitable for the BERT model:
.. only:: python
.. literalinclude:: ../../bindings/python/tests/documentation/test_pipeline.py
:language: python
:start-after: START setup_processor
:end-before: END setup_processor
:dedent: 8
.. only:: rust
.. literalinclude:: ../../tokenizers/tests/documentation.rs
:language: rust
:start-after: START pipeline_setup_processor
:end-before: END pipeline_setup_processor
:dedent: 4
.. only:: node
.. literalinclude:: ../../bindings/node/examples/documentation/pipeline.test.ts
:language: javascript
:start-after: START setup_processor
:end-before: END setup_processor
:dedent: 8
Note that contrarily to the pre-tokenizer or the normalizer, you don't need to retrain a tokenizer
after changing its post-processor.
.. _example:
All together: a BERT tokenizer from scratch
----------------------------------------------------------------------------------------------------
Let's put all those pieces together to build a BERT tokenizer. First, BERT relies on WordPiece, so
we instantiate a new :entity:`Tokenizer` with this model:
.. only:: python
.. literalinclude:: ../../bindings/python/tests/documentation/test_pipeline.py
:language: python
:start-after: START bert_setup_tokenizer
:end-before: END bert_setup_tokenizer
:dedent: 8
.. only:: rust
.. literalinclude:: ../../tokenizers/tests/documentation.rs
:language: rust
:start-after: START bert_setup_tokenizer
:end-before: END bert_setup_tokenizer
:dedent: 4
.. only:: node
.. literalinclude:: ../../bindings/node/examples/documentation/pipeline.test.ts
:language: javascript
:start-after: START bert_setup_tokenizer
:end-before: END bert_setup_tokenizer
:dedent: 8
Then we know that BERT preprocesses texts by removing accents and lowercasing. We also use a unicode
normalizer:
.. only:: python
.. literalinclude:: ../../bindings/python/tests/documentation/test_pipeline.py
:language: python
:start-after: START bert_setup_normalizer
:end-before: END bert_setup_normalizer
:dedent: 8
.. only:: rust
.. literalinclude:: ../../tokenizers/tests/documentation.rs
:language: rust
:start-after: START bert_setup_normalizer
:end-before: END bert_setup_normalizer
:dedent: 4
.. only:: node
.. literalinclude:: ../../bindings/node/examples/documentation/pipeline.test.ts
:language: javascript
:start-after: START bert_setup_normalizer
:end-before: END bert_setup_normalizer
:dedent: 8
The pre-tokenizer is just splitting on whitespace and punctuation:
.. only:: python
.. literalinclude:: ../../bindings/python/tests/documentation/test_pipeline.py
:language: python
:start-after: START bert_setup_pre_tokenizer
:end-before: END bert_setup_pre_tokenizer
:dedent: 8
.. only:: rust
.. literalinclude:: ../../tokenizers/tests/documentation.rs
:language: rust
:start-after: START bert_setup_pre_tokenizer
:end-before: END bert_setup_pre_tokenizer
:dedent: 4
.. only:: node
.. literalinclude:: ../../bindings/node/examples/documentation/pipeline.test.ts
:language: javascript
:start-after: START bert_setup_pre_tokenizer
:end-before: END bert_setup_pre_tokenizer
:dedent: 8
And the post-processing uses the template we saw in the previous section:
.. only:: python
.. literalinclude:: ../../bindings/python/tests/documentation/test_pipeline.py
:language: python
:start-after: START bert_setup_processor
:end-before: END bert_setup_processor
:dedent: 8
.. only:: rust
.. literalinclude:: ../../tokenizers/tests/documentation.rs
:language: rust
:start-after: START bert_setup_processor
:end-before: END bert_setup_processor
:dedent: 4
.. only:: node
.. literalinclude:: ../../bindings/node/examples/documentation/pipeline.test.ts
:language: javascript
:start-after: START bert_setup_processor
:end-before: END bert_setup_processor
:dedent: 8
We can use this tokenizer and train on it on wikitext like in the :doc:`quicktour`:
.. only:: python
.. literalinclude:: ../../bindings/python/tests/documentation/test_pipeline.py
:language: python
:start-after: START bert_train_tokenizer
:end-before: END bert_train_tokenizer
:dedent: 8
.. only:: rust
.. literalinclude:: ../../tokenizers/tests/documentation.rs
:language: rust
:start-after: START bert_train_tokenizer
:end-before: END bert_train_tokenizer
:dedent: 4
.. only:: node
.. literalinclude:: ../../bindings/node/examples/documentation/pipeline.test.ts
:language: javascript
:start-after: START bert_train_tokenizer
:end-before: END bert_train_tokenizer
:dedent: 8
.. _decoding:
Decoding
----------------------------------------------------------------------------------------------------
.. entities:: python
bert_tokenizer
:obj:`bert_tokenizer`
.. entities:: rust
bert_tokenizer
:obj:`bert_tokenizer`
.. entities:: node
bert_tokenizer
:obj:`bertTokenizer`
On top of encoding the input texts, a :entity:`Tokenizer` also has an API for decoding,
that is converting IDs generated by your model back to a text. This is done by the methods
:entity:`Tokenizer.decode` (for one predicted text) and :entity:`Tokenizer.decode_batch` (for a
batch of predictions).
The `decoder` will first convert the IDs back to tokens (using the tokenizer's vocabulary) and
remove all special tokens, then join those tokens with spaces:
.. only:: python
.. literalinclude:: ../../bindings/python/tests/documentation/test_pipeline.py
:language: python
:start-after: START test_decoding
:end-before: END test_decoding
:dedent: 8
.. only:: rust
.. literalinclude:: ../../tokenizers/tests/documentation.rs
:language: rust
:start-after: START pipeline_test_decoding
:end-before: END pipeline_test_decoding
:dedent: 4
.. only:: node
.. literalinclude:: ../../bindings/node/examples/documentation/pipeline.test.ts
:language: javascript
:start-after: START test_decoding
:end-before: END test_decoding
:dedent: 8
If you used a model that added special characters to represent subtokens of a given "word" (like
the :obj:`"##"` in WordPiece) you will need to customize the `decoder` to treat them properly. If we
take our previous :entity:`bert_tokenizer` for instance the default decoding will give:
.. only:: python
.. literalinclude:: ../../bindings/python/tests/documentation/test_pipeline.py
:language: python
:start-after: START bert_test_decoding
:end-before: END bert_test_decoding
:dedent: 8
.. only:: rust
.. literalinclude:: ../../tokenizers/tests/documentation.rs
:language: rust
:start-after: START bert_test_decoding
:end-before: END bert_test_decoding
:dedent: 4
.. only:: node
.. literalinclude:: ../../bindings/node/examples/documentation/pipeline.test.ts
:language: javascript
:start-after: START bert_test_decoding
:end-before: END bert_test_decoding
:dedent: 8
But by changing it to a proper decoder, we get:
.. only:: python
.. literalinclude:: ../../bindings/python/tests/documentation/test_pipeline.py
:language: python
:start-after: START bert_proper_decoding
:end-before: END bert_proper_decoding
:dedent: 8
.. only:: rust
.. literalinclude:: ../../tokenizers/tests/documentation.rs
:language: rust
:start-after: START bert_proper_decoding
:end-before: END bert_proper_decoding
:dedent: 4
.. only:: node
.. literalinclude:: ../../bindings/node/examples/documentation/pipeline.test.ts
:language: javascript
:start-after: START bert_proper_decoding
:end-before: END bert_proper_decoding
:dedent: 8
| 0 |
hf_public_repos/tokenizers/docs | hf_public_repos/tokenizers/docs/source/components.rst | Components
====================================================================================================
When building a Tokenizer, you can attach various types of components to this Tokenizer in order
to customize its behavior. This page lists most provided components.
.. _normalizers:
.. entities:: python
BertNormalizer.clean_text
clean_text
BertNormalizer.handle_chinese_chars
handle_chinese_chars
BertNormalizer.strip_accents
strip_accents
BertNormalizer.lowercase
lowercase
Normalizer.Sequence
``Sequence([NFKC(), Lowercase()])``
PreTokenizer.Sequence
``Sequence([Punctuation(), WhitespaceSplit()])``
SplitDelimiterBehavior.removed
:obj:`removed`
SplitDelimiterBehavior.isolated
:obj:`isolated`
SplitDelimiterBehavior.merged_with_previous
:obj:`merged_with_previous`
SplitDelimiterBehavior.merged_with_next
:obj:`merged_with_next`
SplitDelimiterBehavior.contiguous
:obj:`contiguous`
.. entities:: rust
BertNormalizer.clean_text
clean_text
BertNormalizer.handle_chinese_chars
handle_chinese_chars
BertNormalizer.strip_accents
strip_accents
BertNormalizer.lowercase
lowercase
Normalizer.Sequence
``Sequence::new(vec![NFKC, Lowercase])``
PreTokenizer.Sequence
``Sequence::new(vec![Punctuation, WhitespaceSplit])``
SplitDelimiterBehavior.removed
:obj:`Removed`
SplitDelimiterBehavior.isolated
:obj:`Isolated`
SplitDelimiterBehavior.merged_with_previous
:obj:`MergedWithPrevious`
SplitDelimiterBehavior.merged_with_next
:obj:`MergedWithNext`
SplitDelimiterBehavior.contiguous
:obj:`Contiguous`
.. entities:: node
BertNormalizer.clean_text
cleanText
BertNormalizer.handle_chinese_chars
handleChineseChars
BertNormalizer.strip_accents
stripAccents
BertNormalizer.lowercase
lowercase
Normalizer.Sequence
..
PreTokenizer.Sequence
..
SplitDelimiterBehavior.removed
:obj:`removed`
SplitDelimiterBehavior.isolated
:obj:`isolated`
SplitDelimiterBehavior.merged_with_previous
:obj:`mergedWithPrevious`
SplitDelimiterBehavior.merged_with_next
:obj:`mergedWithNext`
SplitDelimiterBehavior.contiguous
:obj:`contiguous`
Normalizers
----------------------------------------------------------------------------------------------------
A ``Normalizer`` is in charge of pre-processing the input string in order to normalize it as
relevant for a given use case. Some common examples of normalization are the Unicode normalization
algorithms (NFD, NFKD, NFC & NFKC), lowercasing etc...
The specificity of ``tokenizers`` is that we keep track of the alignment while normalizing. This
is essential to allow mapping from the generated tokens back to the input text.
The ``Normalizer`` is optional.
.. list-table::
:header-rows: 1
* - Name
- Description
- Example
* - NFD
- NFD unicode normalization
-
* - NFKD
- NFKD unicode normalization
-
* - NFC
- NFC unicode normalization
-
* - NFKC
- NFKC unicode normalization
-
* - Lowercase
- Replaces all uppercase to lowercase
- Input: ``HELLO ὈΔΥΣΣΕΎΣ``
Output: ``hello ὀδυσσεύς``
* - Strip
- Removes all whitespace characters on the specified sides (left, right or both) of the input
- Input: ``" hi "``
Output: ``"hi"``
* - StripAccents
- Removes all accent symbols in unicode (to be used with NFD for consistency)
- Input: ``é``
Ouput: ``e``
* - Replace
- Replaces a custom string or regexp and changes it with given content
- ``Replace("a", "e")`` will behave like this:
Input: ``"banana"``
Ouput: ``"benene"``
* - BertNormalizer
- Provides an implementation of the Normalizer used in the original BERT. Options
that can be set are:
- :entity:`BertNormalizer.clean_text`
- :entity:`BertNormalizer.handle_chinese_chars`
- :entity:`BertNormalizer.strip_accents`
- :entity:`BertNormalizer.lowercase`
-
* - Sequence
- Composes multiple normalizers that will run in the provided order
- :entity:`Normalizer.Sequence`
.. _pre-tokenizers:
Pre tokenizers
----------------------------------------------------------------------------------------------------
The ``PreTokenizer`` takes care of splitting the input according to a set of rules. This
pre-processing lets you ensure that the underlying ``Model`` does not build tokens across multiple
"splits".
For example if you don't want to have whitespaces inside a token, then you can have a
``PreTokenizer`` that splits on these whitespaces.
You can easily combine multiple ``PreTokenizer`` together using a ``Sequence`` (see below).
The ``PreTokenizer`` is also allowed to modify the string, just like a ``Normalizer`` does. This
is necessary to allow some complicated algorithms that require to split before normalizing (e.g.
the ByteLevel)
.. list-table::
:header-rows: 1
* - Name
- Description
- Example
* - ByteLevel
- Splits on whitespaces while remapping all the bytes to a set of visible characters. This
technique as been introduced by OpenAI with GPT-2 and has some more or less nice properties:
- Since it maps on bytes, a tokenizer using this only requires **256** characters as initial
alphabet (the number of values a byte can have), as opposed to the 130,000+ Unicode
characters.
- A consequence of the previous point is that it is absolutely unnecessary to have an
unknown token using this since we can represent anything with 256 tokens (Youhou!! 🎉🎉)
- For non ascii characters, it gets completely unreadable, but it works nonetheless!
- Input: ``"Hello my friend, how are you?"``
Ouput: ``"Hello", "Ġmy", Ġfriend", ",", "Ġhow", "Ġare", "Ġyou", "?"``
* - Whitespace
- Splits on word boundaries (using the following regular expression: ``\w+|[^\w\s]+``
- Input: ``"Hello there!"``
Output: ``"Hello", "there", "!"``
* - WhitespaceSplit
- Splits on any whitespace character
- Input: ``"Hello there!"``
Output: ``"Hello", "there!"``
* - Punctuation
- Will isolate all punctuation characters
- Input: ``"Hello?"``
Ouput: ``"Hello", "?"``
* - Metaspace
- Splits on whitespaces and replaces them with a special char "▁" (U+2581)
- Input: ``"Hello there"``
Ouput: ``"Hello", "▁there"``
* - CharDelimiterSplit
- Splits on a given character
- Example with ``x``:
Input: ``"Helloxthere"``
Ouput: ``"Hello", "there"``
* - Digits
- Splits the numbers from any other characters.
- Input: ``"Hello123there"``
Output: ```"Hello", "123", "there"```
* - Split
- Versatile pre-tokenizer that splits on provided pattern and according to provided behavior.
The pattern can be inverted if necessary.
- pattern should be either a custom string or regexp.
- behavior should be one of:
* :entity:`SplitDelimiterBehavior.removed`
* :entity:`SplitDelimiterBehavior.isolated`
* :entity:`SplitDelimiterBehavior.merged_with_previous`
* :entity:`SplitDelimiterBehavior.merged_with_next`
* :entity:`SplitDelimiterBehavior.contiguous`
- invert should be a boolean flag.
- Example with `pattern` = :obj:`" "`, `behavior` = :obj:`"isolated"`, `invert` = :obj:`False`:
Input: ``"Hello, how are you?"``
Output: ```"Hello,", " ", "how", " ", "are", " ", "you?"```
* - Sequence
- Lets you compose multiple ``PreTokenizer`` that will be run in the given order
- :entity:`PreTokenizer.Sequence`
.. _models:
Models
----------------------------------------------------------------------------------------------------
Models are the core algorithms used to actually tokenize, and therefore, they are the only mandatory
component of a Tokenizer.
.. list-table::
:header-rows: 1
* - Name
- Description
* - WordLevel
- This is the "classic" tokenization algorithm. It let's you simply map words to IDs
without anything fancy. This has the advantage of being really simple to use and
understand, but it requires extremely large vocabularies for a good coverage.
*Using this* ``Model`` *requires the use of a* ``PreTokenizer``. *No choice will be made by
this model directly, it simply maps input tokens to IDs*
* - BPE
- One of the most popular subword tokenization algorithm. The Byte-Pair-Encoding works by
starting with characters, while merging those that are the most frequently seen together,
thus creating new tokens. It then works iteratively to build new tokens out of the most
frequent pairs it sees in a corpus.
BPE is able to build words it has never seen by using multiple subword tokens, and thus
requires smaller vocabularies, with less chances of having "unk" (unknown) tokens.
* - WordPiece
- This is a subword tokenization algorithm quite similar to BPE, used mainly by Google in
models like BERT. It uses a greedy algorithm, that tries to build long words first, splitting
in multiple tokens when entire words don't exist in the vocabulary. This is different from
BPE that starts from characters, building bigger tokens as possible.
It uses the famous ``##`` prefix to identify tokens that are part of a word (ie not starting
a word).
* - Unigram
- Unigram is also a subword tokenization algorithm, and works by trying to identify the best
set of subword tokens to maximize the probability for a given sentence. This is different
from BPE in the way that this is not deterministic based on a set of rules applied
sequentially. Instead Unigram will be able to compute multiple ways of tokenizing, while
choosing the most probable one.
.. _post-processors:
PostProcessor
----------------------------------------------------------------------------------------------------
After the whole pipeline, we sometimes want to insert some special tokens before feed
a tokenized string into a model like "[CLS] My horse is amazing [SEP]". The ``PostProcessor``
is the component doing just that.
.. list-table::
:header-rows: 1
* - Name
- Description
- Example
* - TemplateProcessing
- Let's you easily template the post processing, adding special tokens, and specifying
the ``type_id`` for each sequence/special token. The template is given two strings
representing the single sequence and the pair of sequences, as well as a set of
special tokens to use.
- Example, when specifying a template with these values:
- single: ``"[CLS] $A [SEP]"``
- pair: ``"[CLS] $A [SEP] $B [SEP]"``
- special tokens:
- ``"[CLS]"``
- ``"[SEP]"``
Input: ``("I like this", "but not this")``
Output: ``"[CLS] I like this [SEP] but not this [SEP]"``
.. _decoders:
Decoders
----------------------------------------------------------------------------------------------------
The Decoder knows how to go from the IDs used by the Tokenizer, back to a readable piece of text.
Some ``Normalizer`` and ``PreTokenizer`` use special characters or identifiers that need to be
reverted for example.
.. list-table::
:header-rows: 1
* - Name
- Description
* - ByteLevel
- Reverts the ByteLevel PreTokenizer. This PreTokenizer encodes at the byte-level, using
a set of visible Unicode characters to represent each byte, so we need a Decoder to
revert this process and get something readable again.
* - Metaspace
- Reverts the Metaspace PreTokenizer. This PreTokenizer uses a special identifer ``▁`` to
identify whitespaces, and so this Decoder helps with decoding these.
* - WordPiece
- Reverts the WordPiece Model. This model uses a special identifier ``##`` for continuing
subwords, and so this Decoder helps with decoding these.
| 0 |
hf_public_repos/tokenizers/docs | hf_public_repos/tokenizers/docs/source/quicktour.rst | Quicktour
====================================================================================================
Let's have a quick look at the 🤗 Tokenizers library features. The library provides an
implementation of today's most used tokenizers that is both easy to use and blazing fast.
.. only:: python
It can be used to instantiate a :ref:`pretrained tokenizer <pretrained>` but we will start our
quicktour by building one from scratch and see how we can train it.
Build a tokenizer from scratch
----------------------------------------------------------------------------------------------------
To illustrate how fast the 🤗 Tokenizers library is, let's train a new tokenizer on `wikitext-103
<https://blog.einstein.ai/the-wikitext-long-term-dependency-language-modeling-dataset/>`__ (516M of
text) in just a few seconds. First things first, you will need to download this dataset and unzip it
with:
.. code-block:: bash
wget https://s3.amazonaws.com/research.metamind.io/wikitext/wikitext-103-raw-v1.zip
unzip wikitext-103-raw-v1.zip
Training the tokenizer
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. entities:: python
BpeTrainer
:class:`~tokenizers.trainers.BpeTrainer`
vocab_size
:obj:`vocab_size`
min_frequency
:obj:`min_frequency`
special_tokens
:obj:`special_tokens`
unk_token
:obj:`unk_token`
pad_token
:obj:`pad_token`
.. entities:: rust
BpeTrainer
:rust_struct:`~tokenizers::models::bpe::BpeTrainer`
vocab_size
:obj:`vocab_size`
min_frequency
:obj:`min_frequency`
special_tokens
:obj:`special_tokens`
unk_token
:obj:`unk_token`
pad_token
:obj:`pad_token`
.. entities:: node
BpeTrainer
BpeTrainer
vocab_size
:obj:`vocabSize`
min_frequency
:obj:`minFrequency`
special_tokens
:obj:`specialTokens`
unk_token
:obj:`unkToken`
pad_token
:obj:`padToken`
In this tour, we will build and train a Byte-Pair Encoding (BPE) tokenizer. For more information
about the different type of tokenizers, check out this `guide
<https://huggingface.co/docs/transformers/main/en/tokenizer_summary#summary-of-the-tokenizers>`__ in the 🤗 Transformers
documentation. Here, training the tokenizer means it will learn merge rules by:
- Start with all the characters present in the training corpus as tokens.
- Identify the most common pair of tokens and merge it into one token.
- Repeat until the vocabulary (e.g., the number of tokens) has reached the size we want.
The main API of the library is the :entity:`class` :entity:`Tokenizer`, here is how we instantiate
one with a BPE model:
.. only:: python
.. literalinclude:: ../../bindings/python/tests/documentation/test_quicktour.py
:language: python
:start-after: START init_tokenizer
:end-before: END init_tokenizer
:dedent: 8
.. only:: rust
.. literalinclude:: ../../tokenizers/tests/documentation.rs
:language: rust
:start-after: START quicktour_init_tokenizer
:end-before: END quicktour_init_tokenizer
:dedent: 4
.. only:: node
.. literalinclude:: ../../bindings/node/examples/documentation/quicktour.test.ts
:language: javascript
:start-after: START init_tokenizer
:end-before: END init_tokenizer
:dedent: 8
To train our tokenizer on the wikitext files, we will need to instantiate a `trainer`, in this case
a :entity:`BpeTrainer`
.. only:: python
.. literalinclude:: ../../bindings/python/tests/documentation/test_quicktour.py
:language: python
:start-after: START init_trainer
:end-before: END init_trainer
:dedent: 8
.. only:: rust
.. literalinclude:: ../../tokenizers/tests/documentation.rs
:language: rust
:start-after: START quicktour_init_trainer
:end-before: END quicktour_init_trainer
:dedent: 4
.. only:: node
.. literalinclude:: ../../bindings/node/examples/documentation/quicktour.test.ts
:language: javascript
:start-after: START init_trainer
:end-before: END init_trainer
:dedent: 8
We can set the training arguments like :entity:`vocab_size` or :entity:`min_frequency` (here left at
their default values of 30,000 and 0) but the most important part is to give the
:entity:`special_tokens` we plan to use later on (they are not used at all during training) so that
they get inserted in the vocabulary.
.. note::
The order in which you write the special tokens list matters: here :obj:`"[UNK]"` will get the
ID 0, :obj:`"[CLS]"` will get the ID 1 and so forth.
We could train our tokenizer right now, but it wouldn't be optimal. Without a pre-tokenizer that
will split our inputs into words, we might get tokens that overlap several words: for instance we
could get an :obj:`"it is"` token since those two words often appear next to each other. Using a
pre-tokenizer will ensure no token is bigger than a word returned by the pre-tokenizer. Here we want
to train a subword BPE tokenizer, and we will use the easiest pre-tokenizer possible by splitting
on whitespace.
.. only:: python
.. literalinclude:: ../../bindings/python/tests/documentation/test_quicktour.py
:language: python
:start-after: START init_pretok
:end-before: END init_pretok
:dedent: 8
.. only:: rust
.. literalinclude:: ../../tokenizers/tests/documentation.rs
:language: rust
:start-after: START quicktour_init_pretok
:end-before: END quicktour_init_pretok
:dedent: 4
.. only:: node
.. literalinclude:: ../../bindings/node/examples/documentation/quicktour.test.ts
:language: javascript
:start-after: START init_pretok
:end-before: END init_pretok
:dedent: 8
Now, we can just call the :entity:`Tokenizer.train` method with any list of files we want
to use:
.. only:: python
.. literalinclude:: ../../bindings/python/tests/documentation/test_quicktour.py
:language: python
:start-after: START train
:end-before: END train
:dedent: 8
.. only:: rust
.. literalinclude:: ../../tokenizers/tests/documentation.rs
:language: rust
:start-after: START quicktour_train
:end-before: END quicktour_train
:dedent: 4
.. only:: node
.. literalinclude:: ../../bindings/node/examples/documentation/quicktour.test.ts
:language: javascript
:start-after: START train
:end-before: END train
:dedent: 8
This should only take a few seconds to train our tokenizer on the full wikitext dataset!
To save the tokenizer in one file that contains all its configuration and vocabulary, just use the
:entity:`Tokenizer.save` method:
.. only:: python
.. literalinclude:: ../../bindings/python/tests/documentation/test_quicktour.py
:language: python
:start-after: START save
:end-before: END save
:dedent: 8
.. only:: rust
.. literalinclude:: ../../tokenizers/tests/documentation.rs
:language: rust
:start-after: START quicktour_save
:end-before: END quicktour_save
:dedent: 4
.. only:: node
.. literalinclude:: ../../bindings/node/examples/documentation/quicktour.test.ts
:language: javascript
:start-after: START save
:end-before: END save
:dedent: 8
and you can reload your tokenizer from that file with the :entity:`Tokenizer.from_file`
:entity:`classmethod`:
.. only:: python
.. literalinclude:: ../../bindings/python/tests/documentation/test_quicktour.py
:language: python
:start-after: START reload_tokenizer
:end-before: END reload_tokenizer
:dedent: 12
.. only:: rust
.. literalinclude:: ../../tokenizers/tests/documentation.rs
:language: rust
:start-after: START quicktour_reload_tokenizer
:end-before: END quicktour_reload_tokenizer
:dedent: 4
.. only:: node
.. literalinclude:: ../../bindings/node/examples/documentation/quicktour.test.ts
:language: javascript
:start-after: START reload_tokenizer
:end-before: END reload_tokenizer
:dedent: 8
Using the tokenizer
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Now that we have trained a tokenizer, we can use it on any text we want with the
:entity:`Tokenizer.encode` method:
.. only:: python
.. literalinclude:: ../../bindings/python/tests/documentation/test_quicktour.py
:language: python
:start-after: START encode
:end-before: END encode
:dedent: 8
.. only:: rust
.. literalinclude:: ../../tokenizers/tests/documentation.rs
:language: rust
:start-after: START quicktour_encode
:end-before: END quicktour_encode
:dedent: 4
.. only:: node
.. literalinclude:: ../../bindings/node/examples/documentation/quicktour.test.ts
:language: javascript
:start-after: START encode
:end-before: END encode
:dedent: 8
This applied the full pipeline of the tokenizer on the text, returning an
:entity:`Encoding` object. To learn more about this pipeline, and how to apply (or
customize) parts of it, check out :doc:`this page <pipeline>`.
This :entity:`Encoding` object then has all the attributes you need for your deep
learning model (or other). The :obj:`tokens` attribute contains the segmentation of your text in
tokens:
.. only:: python
.. literalinclude:: ../../bindings/python/tests/documentation/test_quicktour.py
:language: python
:start-after: START print_tokens
:end-before: END print_tokens
:dedent: 8
.. only:: rust
.. literalinclude:: ../../tokenizers/tests/documentation.rs
:language: rust
:start-after: START quicktour_print_tokens
:end-before: END quicktour_print_tokens
:dedent: 4
.. only:: node
.. literalinclude:: ../../bindings/node/examples/documentation/quicktour.test.ts
:language: javascript
:start-after: START print_tokens
:end-before: END print_tokens
:dedent: 8
Similarly, the :obj:`ids` attribute will contain the index of each of those tokens in the
tokenizer's vocabulary:
.. only:: python
.. literalinclude:: ../../bindings/python/tests/documentation/test_quicktour.py
:language: python
:start-after: START print_ids
:end-before: END print_ids
:dedent: 8
.. only:: rust
.. literalinclude:: ../../tokenizers/tests/documentation.rs
:language: rust
:start-after: START quicktour_print_ids
:end-before: END quicktour_print_ids
:dedent: 4
.. only:: node
.. literalinclude:: ../../bindings/node/examples/documentation/quicktour.test.ts
:language: javascript
:start-after: START print_ids
:end-before: END print_ids
:dedent: 8
An important feature of the 🤗 Tokenizers library is that it comes with full alignment tracking,
meaning you can always get the part of your original sentence that corresponds to a given token.
Those are stored in the :obj:`offsets` attribute of our :entity:`Encoding` object. For
instance, let's assume we would want to find back what caused the :obj:`"[UNK]"` token to appear,
which is the token at index 9 in the list, we can just ask for the offset at the index:
.. only:: python
.. literalinclude:: ../../bindings/python/tests/documentation/test_quicktour.py
:language: python
:start-after: START print_offsets
:end-before: END print_offsets
:dedent: 8
.. only:: rust
.. literalinclude:: ../../tokenizers/tests/documentation.rs
:language: rust
:start-after: START quicktour_print_offsets
:end-before: END quicktour_print_offsets
:dedent: 4
.. only:: node
.. literalinclude:: ../../bindings/node/examples/documentation/quicktour.test.ts
:language: javascript
:start-after: START print_offsets
:end-before: END print_offsets
:dedent: 8
and those are the indices that correspond to the emoji in the original sentence:
.. only:: python
.. literalinclude:: ../../bindings/python/tests/documentation/test_quicktour.py
:language: python
:start-after: START use_offsets
:end-before: END use_offsets
:dedent: 8
.. only:: rust
.. literalinclude:: ../../tokenizers/tests/documentation.rs
:language: rust
:start-after: START quicktour_use_offsets
:end-before: END quicktour_use_offsets
:dedent: 4
.. only:: node
.. literalinclude:: ../../bindings/node/examples/documentation/quicktour.test.ts
:language: javascript
:start-after: START use_offsets
:end-before: END use_offsets
:dedent: 8
Post-processing
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
We might want our tokenizer to automatically add special tokens, like :obj:`"[CLS]"` or
:obj:`"[SEP]"`. To do this, we use a post-processor. :entity:`TemplateProcessing` is the
most commonly used, you just have to specify a template for the processing of single sentences and
pairs of sentences, along with the special tokens and their IDs.
When we built our tokenizer, we set :obj:`"[CLS]"` and :obj:`"[SEP]"` in positions 1 and 2 of our
list of special tokens, so this should be their IDs. To double-check, we can use the
:entity:`Tokenizer.token_to_id` method:
.. only:: python
.. literalinclude:: ../../bindings/python/tests/documentation/test_quicktour.py
:language: python
:start-after: START check_sep
:end-before: END check_sep
:dedent: 8
.. only:: rust
.. literalinclude:: ../../tokenizers/tests/documentation.rs
:language: rust
:start-after: START quicktour_check_sep
:end-before: END quicktour_check_sep
:dedent: 4
.. only:: node
.. literalinclude:: ../../bindings/node/examples/documentation/quicktour.test.ts
:language: javascript
:start-after: START check_sep
:end-before: END check_sep
:dedent: 8
Here is how we can set the post-processing to give us the traditional BERT inputs:
.. only:: python
.. literalinclude:: ../../bindings/python/tests/documentation/test_quicktour.py
:language: python
:start-after: START init_template_processing
:end-before: END init_template_processing
:dedent: 8
.. only:: rust
.. literalinclude:: ../../tokenizers/tests/documentation.rs
:language: rust
:start-after: START quicktour_init_template_processing
:end-before: END quicktour_init_template_processing
:dedent: 4
.. only:: node
.. literalinclude:: ../../bindings/node/examples/documentation/quicktour.test.ts
:language: javascript
:start-after: START init_template_processing
:end-before: END init_template_processing
:dedent: 8
Let's go over this snippet of code in more details. First we specify the template for single
sentences: those should have the form :obj:`"[CLS] $A [SEP]"` where :obj:`$A` represents our
sentence.
Then, we specify the template for sentence pairs, which should have the form
:obj:`"[CLS] $A [SEP] $B [SEP]"` where :obj:`$A` represents the first sentence and :obj:`$B` the
second one. The :obj:`:1` added in the template represent the `type IDs` we want for each part of
our input: it defaults to 0 for everything (which is why we don't have :obj:`$A:0`) and here we set
it to 1 for the tokens of the second sentence and the last :obj:`"[SEP]"` token.
Lastly, we specify the special tokens we used and their IDs in our tokenizer's vocabulary.
To check out this worked properly, let's try to encode the same sentence as before:
.. only:: python
.. literalinclude:: ../../bindings/python/tests/documentation/test_quicktour.py
:language: python
:start-after: START print_special_tokens
:end-before: END print_special_tokens
:dedent: 8
.. only:: rust
.. literalinclude:: ../../tokenizers/tests/documentation.rs
:language: rust
:start-after: START quicktour_print_special_tokens
:end-before: END quicktour_print_special_tokens
:dedent: 4
.. only:: node
.. literalinclude:: ../../bindings/node/examples/documentation/quicktour.test.ts
:language: javascript
:start-after: START print_special_tokens
:end-before: END print_special_tokens
:dedent: 8
To check the results on a pair of sentences, we just pass the two sentences to
:entity:`Tokenizer.encode`:
.. only:: python
.. literalinclude:: ../../bindings/python/tests/documentation/test_quicktour.py
:language: python
:start-after: START print_special_tokens_pair
:end-before: END print_special_tokens_pair
:dedent: 8
.. only:: rust
.. literalinclude:: ../../tokenizers/tests/documentation.rs
:language: rust
:start-after: START quicktour_print_special_tokens_pair
:end-before: END quicktour_print_special_tokens_pair
:dedent: 4
.. only:: node
.. literalinclude:: ../../bindings/node/examples/documentation/quicktour.test.ts
:language: javascript
:start-after: START print_special_tokens_pair
:end-before: END print_special_tokens_pair
:dedent: 8
You can then check the type IDs attributed to each token is correct with
.. only:: python
.. literalinclude:: ../../bindings/python/tests/documentation/test_quicktour.py
:language: python
:start-after: START print_type_ids
:end-before: END print_type_ids
:dedent: 8
.. only:: rust
.. literalinclude:: ../../tokenizers/tests/documentation.rs
:language: rust
:start-after: START quicktour_print_type_ids
:end-before: END quicktour_print_type_ids
:dedent: 4
.. only:: node
.. literalinclude:: ../../bindings/node/examples/documentation/quicktour.test.ts
:language: javascript
:start-after: START print_type_ids
:end-before: END print_type_ids
:dedent: 8
If you save your tokenizer with :entity:`Tokenizer.save`, the post-processor will be saved along.
Encoding multiple sentences in a batch
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
To get the full speed of the 🤗 Tokenizers library, it's best to process your texts by batches by
using the :entity:`Tokenizer.encode_batch` method:
.. only:: python
.. literalinclude:: ../../bindings/python/tests/documentation/test_quicktour.py
:language: python
:start-after: START encode_batch
:end-before: END encode_batch
:dedent: 8
.. only:: rust
.. literalinclude:: ../../tokenizers/tests/documentation.rs
:language: rust
:start-after: START quicktour_encode_batch
:end-before: END quicktour_encode_batch
:dedent: 4
.. only:: node
.. literalinclude:: ../../bindings/node/examples/documentation/quicktour.test.ts
:language: javascript
:start-after: START encode_batch
:end-before: END encode_batch
:dedent: 8
The output is then a list of :entity:`Encoding` objects like the ones we saw before. You
can process together as many texts as you like, as long as it fits in memory.
To process a batch of sentences pairs, pass two lists to the
:entity:`Tokenizer.encode_batch` method: the list of sentences A and the list of sentences
B:
.. only:: python
.. literalinclude:: ../../bindings/python/tests/documentation/test_quicktour.py
:language: python
:start-after: START encode_batch_pair
:end-before: END encode_batch_pair
:dedent: 8
.. only:: rust
.. literalinclude:: ../../tokenizers/tests/documentation.rs
:language: rust
:start-after: START quicktour_encode_batch_pair
:end-before: END quicktour_encode_batch_pair
:dedent: 4
.. only:: node
.. literalinclude:: ../../bindings/node/examples/documentation/quicktour.test.ts
:language: javascript
:start-after: START encode_batch_pair
:end-before: END encode_batch_pair
:dedent: 8
When encoding multiple sentences, you can automatically pad the outputs to the longest sentence
present by using :entity:`Tokenizer.enable_padding`, with the :entity:`pad_token` and its ID
(which we can double-check the id for the padding token with
:entity:`Tokenizer.token_to_id` like before):
.. only:: python
.. literalinclude:: ../../bindings/python/tests/documentation/test_quicktour.py
:language: python
:start-after: START enable_padding
:end-before: END enable_padding
:dedent: 8
.. only:: rust
.. literalinclude:: ../../tokenizers/tests/documentation.rs
:language: rust
:start-after: START quicktour_enable_padding
:end-before: END quicktour_enable_padding
:dedent: 4
.. only:: node
.. literalinclude:: ../../bindings/node/examples/documentation/quicktour.test.ts
:language: javascript
:start-after: START enable_padding
:end-before: END enable_padding
:dedent: 8
We can set the :obj:`direction` of the padding (defaults to the right) or a given :obj:`length` if
we want to pad every sample to that specific number (here we leave it unset to pad to the size of
the longest text).
.. only:: python
.. literalinclude:: ../../bindings/python/tests/documentation/test_quicktour.py
:language: python
:start-after: START print_batch_tokens
:end-before: END print_batch_tokens
:dedent: 8
.. only:: rust
.. literalinclude:: ../../tokenizers/tests/documentation.rs
:language: rust
:start-after: START quicktour_print_batch_tokens
:end-before: END quicktour_print_batch_tokens
:dedent: 4
.. only:: node
.. literalinclude:: ../../bindings/node/examples/documentation/quicktour.test.ts
:language: javascript
:start-after: START print_batch_tokens
:end-before: END print_batch_tokens
:dedent: 8
In this case, the `attention mask` generated by the tokenizer takes the padding into account:
.. only:: python
.. literalinclude:: ../../bindings/python/tests/documentation/test_quicktour.py
:language: python
:start-after: START print_attention_mask
:end-before: END print_attention_mask
:dedent: 8
.. only:: rust
.. literalinclude:: ../../tokenizers/tests/documentation.rs
:language: rust
:start-after: START quicktour_print_attention_mask
:end-before: END quicktour_print_attention_mask
:dedent: 4
.. only:: node
.. literalinclude:: ../../bindings/node/examples/documentation/quicktour.test.ts
:language: javascript
:start-after: START print_attention_mask
:end-before: END print_attention_mask
:dedent: 8
.. _pretrained:
.. only:: python
Using a pretrained tokenizer
------------------------------------------------------------------------------------------------
You can load any tokenizer from the Hugging Face Hub as long as a `tokenizer.json` file is
available in the repository.
.. code-block:: python
from tokenizers import Tokenizer
tokenizer = Tokenizer.from_pretrained("bert-base-uncased")
Importing a pretrained tokenizer from legacy vocabulary files
------------------------------------------------------------------------------------------------
You can also import a pretrained tokenizer directly in, as long as you have its vocabulary file.
For instance, here is how to import the classic pretrained BERT tokenizer:
.. code-block:: python
from tokenizers import BertWordPieceTokenizer
tokenizer = BertWordPieceTokenizer("bert-base-uncased-vocab.txt", lowercase=True)
as long as you have downloaded the file `bert-base-uncased-vocab.txt` with
.. code-block:: bash
wget https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-uncased-vocab.txt
| 0 |
hf_public_repos/tokenizers/docs | hf_public_repos/tokenizers/docs/source/index.rst | Tokenizers
====================================================================================================
Fast State-of-the-art tokenizers, optimized for both research and production
`🤗 Tokenizers`_ provides an implementation of today's most used tokenizers, with
a focus on performance and versatility. These tokenizers are also used in
`🤗 Transformers`_.
.. _🤗 Tokenizers: https://github.com/huggingface/tokenizers
.. _🤗 Transformers: https://github.com/huggingface/transformers
Main features:
----------------------------------------------------------------------------------------------------
- Train new vocabularies and tokenize, using today's most used tokenizers.
- Extremely fast (both training and tokenization), thanks to the Rust implementation. Takes
less than 20 seconds to tokenize a GB of text on a server's CPU.
- Easy to use, but also extremely versatile.
- Designed for both research and production.
- Full alignment tracking. Even with destructive normalization, it's always possible to get
the part of the original sentence that corresponds to any token.
- Does all the pre-processing: Truncation, Padding, add the special tokens your model needs.
.. toctree::
:maxdepth: 2
:caption: Getting Started
quicktour
installation/main
pipeline
components
.. toctree-tags::
:maxdepth: 3
:caption: Using 🤗 Tokenizers
:glob:
:python:tutorials/python/*
.. toctree::
:maxdepth: 3
:caption: API Reference
api/reference
.. include:: entities.inc
| 0 |
hf_public_repos/tokenizers/docs | hf_public_repos/tokenizers/docs/source/entities.inc | .. entities:: python
:global:
class
class
classmethod
class method
Tokenizer
:class:`~tokenizers.Tokenizer`
Tokenizer.train
:meth:`~tokenizers.Tokenizer.train`
Tokenizer.save
:meth:`~tokenizers.Tokenizer.save`
Tokenizer.from_file
:meth:`~tokenizers.Tokenizer.from_file`
Tokenizer.encode
:meth:`~tokenizers.Tokenizer.encode`
Tokenizer.encode_batch
:meth:`~tokenizers.Tokenizer.encode_batch`
Tokenizer.decode
:meth:`~tokenizers.Tokenizer.decode`
Tokenizer.decode_batch
:meth:`~tokenizers.Tokenizer.decode_batch`
Tokenizer.token_to_id
:meth:`~tokenizers.Tokenizer.token_to_id`
Tokenizer.enable_padding
:meth:`~tokenizers.Tokenizer.enable_padding`
Encoding
:class:`~tokenizers.Encoding`
TemplateProcessing
:class:`~tokenizers.processors.TemplateProcessing`
Normalizer
:class:`~tokenizers.normalizers.Normalizer`
normalizers.Sequence
:class:`~tokenizers.normalizers.Sequence`
pre_tokenizers.Whitespace
:class:`~tokenizers.pre_tokenizers.Whitespace`
PreTokenizer
:class:`~tokenizers.pre_tokenizers.PreTokenizer`
models.BPE
:class:`~tokenizers.models.BPE`
models.Unigram
:class:`~tokenizers.models.Unigram`
models.WordLevel
:class:`~tokenizers.models.WordLevel`
models.WordPiece
:class:`~tokenizers.models.WordPiece`
Decoder
:class:`~tokenizers.decoders.Decoder`
.. entities:: rust
:global:
class
struct
classmethod
static method
Tokenizer
:rust_struct:`~tokenizers::tokenizer::Tokenizer`
Tokenizer.train
:rust_meth:`~tokenizers::tokenizer::Tokenizer::train`
Tokenizer.save
:rust_meth:`~tokenizers::tokenizer::Tokenizer::save`
Tokenizer.from_file
:rust_meth:`~tokenizers::tokenizer::Tokenizer::from_file`
Tokenizer.encode
:rust_meth:`~tokenizers::tokenizer::Tokenizer::encode`
Tokenizer.encode_batch
:rust_meth:`~tokenizers::tokenizer::Tokenizer::encode_batch`
Tokenizer.decode
:rust_meth:`~tokenizers::tokenizer::Tokenizer::decode`
Tokenizer.decode_batch
:rust_meth:`~tokenizers::tokenizer::Tokenizer::decode_batch`
Tokenizer.token_to_id
:rust_meth:`~tokenizers::tokenizer::Tokenizer::token_to_id`
Tokenizer.enable_padding
:rust_meth:`~tokenizers::tokenizer::Tokenizer::enable_padding`
Encoding
:rust_struct:`~tokenizers::tokenizer::Encoding`
TemplateProcessing
:rust_struct:`~tokenizers::processors::template::TemplateProcessing`
Normalizer
:rust_trait:`~tokenizers::tokenizer::Normalizer`
normalizers.Sequence
:rust_struct:`~tokenizers::normalizers::utils::Sequence`
pre_tokenizers.Whitespace
:rust_struct:`~tokenizers::normalizers::whitespace::Whitespace`
PreTokenizer
:rust_trait:`~tokenizers::tokenizer::PreTokenizer`
models.BPE
:rust_struct:`~tokenizers::models::bpe::BPE`
models.Unigram
:rust_struct:`~tokenizers::models::unigram::Unigram`
models.WordLevel
:rust_struct:`~tokenizers::models::wordlevel::WordLevel`
models.WordPiece
:rust_struct:`~tokenizers::models::wordpiece::WordPiece`
Decoder
:rust_trait:`~tokenizers::tokenizer::Decoder`
.. entities:: node
:global:
class
class
classmethod
static method
Tokenizer
:obj:`Tokenizer`
Tokenizer.train
:obj:`Tokenizer.train()`
Tokenizer.save
:obj:`Tokenizer.save()`
Tokenizer.from_file
:obj:`Tokenizer.fromFile()`
Tokenizer.encode
:obj:`Tokenizer.encode()`
Tokenizer.encode_batch
:obj:`Tokenizer.encodeBatch()`
Tokenizer.decode
:obj:`Tokenizer.decode()`
Tokenizer.decode_batch
:obj:`Tokenizer.decodeBatch()`
Tokenizer.token_to_id
:obj:`Tokenizer.tokenToId()`
Tokenizer.enable_padding
:obj:`Tokenizer.setPadding()`
Encoding
:obj:`Encoding`
TemplateProcessing
:obj:`TemplateProcessing`
Normalizer
:obj:`Normalizer`
normalizers.Sequence
:obj:`Sequence`
pre_tokenizers.Whitespace
:obj:`Whitespace`
PreTokenizer
:obj:`PreTokenizer`
models.BPE
:obj:`BPE`
models.Unigram
:obj:`Unigram`
models.WordLevel
:obj:`WordLevel`
models.WordPiece
:obj:`WordPiece`
Decoder
:obj:`Decoder`
| 0 |
hf_public_repos/tokenizers/docs/source | hf_public_repos/tokenizers/docs/source/_ext/toctree_tags.py | import re
from sphinx.directives.other import TocTree
class TocTreeTags(TocTree):
hasPat = re.compile("^\s*:(.+):(.+)$")
def filter_entries(self, entries):
filtered = []
for e in entries:
m = self.hasPat.match(e)
if m != None:
if self.env.app.tags.has(m.groups()[0]):
filtered.append(m.groups()[1])
else:
filtered.append(e)
return filtered
def run(self):
self.content = self.filter_entries(self.content)
return super().run()
def setup(app):
app.add_directive("toctree-tags", TocTreeTags)
return {
"version": "0.1",
}
| 0 |
hf_public_repos/tokenizers/docs/source | hf_public_repos/tokenizers/docs/source/_ext/entities.py | from collections import defaultdict, abc
from typing import cast
from docutils import nodes
from docutils.parsers.rst import Directive
import sphinx
from sphinx.locale import _
from sphinx.util.docutils import SphinxDirective
from sphinx.errors import ExtensionError
from conf import languages as LANGUAGES
logger = sphinx.util.logging.getLogger(__name__)
GLOBALNAME = "$GLOBAL$"
def update(d, u):
for k, v in u.items():
if isinstance(v, abc.Mapping):
d[k] = update(d.get(k, {}), v)
else:
d[k] = v
return d
class EntityNode(nodes.General, nodes.Element):
pass
class EntitiesNode(nodes.General, nodes.Element):
pass
class AllEntities:
def __init__(self):
self.entities = defaultdict(dict)
@classmethod
def install(cls, env):
if not hasattr(env, "entity_all_entities"):
entities = cls()
env.entity_all_entities = entities
return env.entity_all_entities
def merge(self, other):
self.entities.update(other.entities)
def purge(self, docname):
for env_docname in [GLOBALNAME, docname]:
self.entities[env_docname] = dict(
[
(name, entity)
for name, entity in self.entities[env_docname].items()
if entity["docname"] != docname
]
)
def _extract_entities(self, nodes):
pass
def _extract_options(self, nodes):
pass
def _add_entities(self, entities, language, is_global, docname):
scope = GLOBALNAME if is_global else docname
for entity in entities:
name = f'{language}-{entity["name"]}'
content = entity["content"]
if name in self.entities[scope]:
logger.warning(
f'Entity "{name}" has already been defined{" globally" if is_global else ""}',
location=docname,
)
self.entities[scope][name] = {"docname": docname, "content": content}
def _extract_global(self, nodes):
for node in nodes:
if node.tagname != "field":
raise Exception(f"Expected a field, found {node.tagname}")
name, _ = node.children
if name.tagname != "field_name":
raise Exception(f"Expected a field name here, found {name_node.tagname}")
if str(name.children[0]) == "global":
return True
def _extract_entities(self, nodes):
entities = []
for node in nodes:
if node.tagname != "definition_list_item":
raise Exception(f"Expected a list item here, found {node.tagname}")
name_node, content_node = node.children
if name_node.tagname != "term":
raise Exception(f"Expected a term here, found {name_node.tagname}")
if content_node.tagname != "definition":
raise Exception(f"Expected a definition here, found {content_node.tagname}")
name = str(name_node.children[0])
if len(content_node.children) == 1 and content_node.children[0].tagname == "paragraph":
content = content_node.children[0].children[0]
else:
content = content_node
entities.append({"name": name, "content": content})
return entities
def extract(self, node, docname):
is_global = False
entities = []
language = None
for node in node.children:
if language is None and node.tagname != "paragraph":
raise Exception(f"Expected language name:\n.. entities:: <LANGUAGE>")
elif language is None and node.tagname == "paragraph":
language = str(node.children[0])
if language not in LANGUAGES:
raise Exception(
f'Unknown language "{language}. Might be missing a newline after language"'
)
elif node.tagname == "field_list":
is_global = self._extract_global(node.children)
elif node.tagname == "definition_list":
entities.extend(self._extract_entities(node.children))
else:
raise Exception(f"Expected a list of terms/options, found {node.tagname}")
self._add_entities(entities, language, is_global, docname)
def resolve_pendings(self, app):
env = app.builder.env
updates = defaultdict(dict)
for env_docname in self.entities.keys():
for name, entity in self.entities[env_docname].items():
docname = entity["docname"]
node = entity["content"]
for node in node.traverse(sphinx.addnodes.pending_xref):
contnode = cast(nodes.TextElement, node[0].deepcopy())
newnode = None
typ = node["reftype"]
target = node["reftarget"]
refdoc = node.get("refdoc", docname)
domain = None
try:
if "refdomain" in node and node["refdomain"]:
# let the domain try to resolve the reference
try:
domain = env.domains[node["refdomain"]]
except KeyError as exc:
raise NoUri(target, typ) from exc
newnode = domain.resolve_xref(
env, refdoc, app.builder, typ, target, node, contnode
)
except NoUri:
newnode = contnode
updates[env_docname][name] = {
"docname": docname,
"content": newnode or contnode,
}
update(self.entities, updates)
def get(self, language, name, docname):
name = f"{language}-{name}"
if name in self.entities[docname]:
return self.entities[docname][name]
elif name in self.entities[GLOBALNAME]:
return self.entities[GLOBALNAME][name]
else:
return None
class EntitiesDirective(SphinxDirective):
has_content = True
def run(self):
content = nodes.definition_list()
self.state.nested_parse(self.content, self.content_offset, content)
try:
entities = AllEntities.install(self.env)
entities.extract(content, self.env.docname)
except Exception as err:
raise self.error(f'Malformed directive "entities": {err}')
return []
def entity_role(name, rawtext, text, lineno, inliner, options={}, content=[]):
node = EntityNode()
node.entity = text
return [node], []
def process_entity_nodes(app, doctree, docname):
""" Replace all the entities by their content """
env = app.builder.env
entities = AllEntities.install(env)
entities.resolve_pendings(app)
language = None
try:
language = next(l for l in LANGUAGES if l in app.tags)
except Exception:
logger.warning(f"No language tag specified, not resolving entities in {docname}")
for node in doctree.traverse(EntityNode):
if language is None:
node.replace_self(nodes.Text(_(node.entity), _(node.entity)))
else:
entity = entities.get(language, node.entity, docname)
if entity is None:
node.replace_self(nodes.Text(_(node.entity), _(node.entity)))
logger.warning(f'Entity "{node.entity}" has not been defined', location=node)
else:
node.replace_self(entity["content"])
def purge_entities(app, env, docname):
""" Purge any entity that comes from the given docname """
entities = AllEntities.install(env)
entities.purge(docname)
def merge_entities(app, env, docnames, other):
""" Merge multiple environment entities """
entities = AllEntities.install(env)
other_entities = AllEntities.install(other)
entities.merge(other_entities)
def setup(app):
app.add_node(EntityNode)
app.add_node(EntitiesNode)
app.add_directive("entities", EntitiesDirective)
app.add_role("entity", entity_role)
app.connect("doctree-resolved", process_entity_nodes)
app.connect("env-merge-info", merge_entities)
app.connect("env-purge-doc", purge_entities)
return {
"version": "0.1",
"parallel_read_safe": True,
"parallel_write_safe": True,
}
| 0 |
hf_public_repos/tokenizers/docs/source | hf_public_repos/tokenizers/docs/source/_ext/rust_doc.py | from docutils import nodes
import sphinx
from sphinx.locale import _
from conf import rust_version
logger = sphinx.util.logging.getLogger(__name__)
class RustRef:
def __call__(self, name, rawtext, text, lineno, inliner, options={}, content=[]):
doctype = name.split("_")[1]
parts = text.split("::")
if text.startswith("~"):
title = parts[-1]
parts[0] = parts[0][1:]
else:
content = text
link = self.base_link()
if doctype == "struct":
l, title = self.make_struct_link(parts, title)
if doctype == "func":
l, title = self.make_func_link(parts, title)
if doctype == "meth":
l, title = self.make_meth_link(parts, title)
if doctype == "trait":
l, title = self.make_trait_link(parts, title)
link += l
node = nodes.reference(internal=False, refuri=link, text=title)
wrapper = nodes.literal(classes=["xref"])
wrapper += node
return [wrapper], []
def base_link(self):
return f"https://docs.rs/tokenizers/{rust_version}"
def make_struct_link(self, parts, title):
link = ""
struct_name = parts[-1]
path = parts[:-1]
for p in path:
link += f"/{p}"
link += f"/struct.{struct_name}.html"
return link, title
def make_func_link(self, parts, title):
link = ""
fn_name = parts[-1]
path = parts[:-1]
for p in path:
link += f"/{p}"
link += f"/fn.{fn_name}.html"
return link, title
def make_meth_link(self, parts, title):
meth_name = parts[-1]
if meth_name.endswith("()"):
meth_name = meth_name[:-2]
link, title = self.make_struct_link(parts[:-1], title)
link += f"#method.{meth_name}"
if not title.endswith(")"):
title += "()"
return link, title
def make_trait_link(self, parts, title):
link = ""
trait_name = parts[-1]
path = parts[:-1]
for p in path:
link += f"/{p}"
link += f"/trait.{trait_name}.html"
return link, title
def setup(app):
app.add_role("rust_struct", RustRef())
app.add_role("rust_func", RustRef())
app.add_role("rust_meth", RustRef())
app.add_role("rust_trait", RustRef())
return {
"version": "0.1",
"parallel_read_safe": True,
"parallel_write_safe": True,
}
| 0 |
hf_public_repos/tokenizers/docs/source | hf_public_repos/tokenizers/docs/source/api/reference.rst | .. only:: python
.. include:: python.inc
.. only:: rust
.. include:: rust.inc
.. only:: node
.. include:: node.inc
| 0 |
hf_public_repos/tokenizers/docs/source | hf_public_repos/tokenizers/docs/source/api/node.inc | Documentation
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
The node API has not been documented yet.
| 0 |
hf_public_repos/tokenizers/docs/source | hf_public_repos/tokenizers/docs/source/api/python.inc | Input sequences
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
These types represent all the different kinds of sequence that can be used as input of a Tokenizer.
Globally, any sequence can be either a string or a list of strings, according to the operating
mode of the tokenizer: ``raw text`` vs ``pre-tokenized``.
.. autodata:: tokenizers.TextInputSequence
.. autodata:: tokenizers.PreTokenizedInputSequence
.. autodata:: tokenizers.InputSequence
Encode inputs
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
These types represent all the different kinds of input that a :class:`~tokenizers.Tokenizer` accepts
when using :meth:`~tokenizers.Tokenizer.encode_batch`.
.. autodata:: tokenizers.TextEncodeInput
.. autodata:: tokenizers.PreTokenizedEncodeInput
.. autodata:: tokenizers.EncodeInput
Tokenizer
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. autoclass:: tokenizers.Tokenizer
:members:
Encoding
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. autoclass:: tokenizers.Encoding
:members:
Added Tokens
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. autoclass:: tokenizers.AddedToken
:members:
Models
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. automodule:: tokenizers.models
:members:
Normalizers
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. automodule:: tokenizers.normalizers
:members:
Pre-tokenizers
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. automodule:: tokenizers.pre_tokenizers
:members:
Post-processor
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. automodule:: tokenizers.processors
:members:
Trainers
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. automodule:: tokenizers.trainers
:members:
Decoders
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. automodule:: tokenizers.decoders
:members:
Visualizer
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. autoclass:: tokenizers.tools.Annotation
:members:
.. autoclass:: tokenizers.tools.EncodingVisualizer
:members: __call__
| 0 |
hf_public_repos/tokenizers/docs/source | hf_public_repos/tokenizers/docs/source/api/rust.inc | Documentation
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
The Rust API Reference is available directly on the `Docs.rs <https://docs.rs/tokenizers>`__
website.
| 0 |
hf_public_repos/tokenizers/docs/source | hf_public_repos/tokenizers/docs/source/installation/main.rst | Installation
====================================================================================================
.. only:: python
.. include:: python.inc
.. only:: rust
.. include:: rust.inc
.. only:: node
.. include:: node.inc
| 0 |
hf_public_repos/tokenizers/docs/source | hf_public_repos/tokenizers/docs/source/installation/node.inc | Installation with npm
----------------------------------------------------------------------------------------------------
You can simply install 🤗 Tokenizers with npm using::
npm install tokenizers
| 0 |
hf_public_repos/tokenizers/docs/source | hf_public_repos/tokenizers/docs/source/installation/python.inc | 🤗 Tokenizers is tested on Python 3.5+.
You should install 🤗 Tokenizers in a
`virtual environment <https://docs.python.org/3/library/venv.html>`_. If you're unfamiliar with
Python virtual environments, check out the
`user guide <https://packaging.python.org/guides/installing-using-pip-and-virtual-environments/>`__.
Create a virtual environment with the version of Python you're going to use and activate it.
Installation with pip
----------------------------------------------------------------------------------------------------
🤗 Tokenizers can be installed using pip as follows::
pip install tokenizers
Installation from sources
----------------------------------------------------------------------------------------------------
To use this method, you need to have the Rust language installed. You can follow
`the official guide <https://www.rust-lang.org/learn/get-started>`__ for more information.
If you are using a unix based OS, the installation should be as simple as running::
curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh
Or you can easiy update it with the following command::
rustup update
Once rust is installed, we can start retrieving the sources for 🤗 Tokenizers::
git clone https://github.com/huggingface/tokenizers
Then we go into the python bindings folder::
cd tokenizers/bindings/python
At this point you should have your `virtual environment`_ already activated. In order to
compile 🤗 Tokenizers, you need to install the Python package :obj:`setuptools_rust`::
pip install setuptools_rust
Then you can have 🤗 Tokenizers compiled and installed in your virtual environment with
the following command::
python setup.py install
| 0 |
hf_public_repos/tokenizers/docs/source | hf_public_repos/tokenizers/docs/source/installation/rust.inc | Crates.io
----------------------------------------------------------------------------------------------------
🤗 Tokenizers is available on `crates.io <https://crates.io/crates/tokenizers>`__.
You just need to add it to your :obj:`Cargo.toml`::
tokenizers = "0.10"
| 0 |
hf_public_repos/tokenizers/docs/source/_static | hf_public_repos/tokenizers/docs/source/_static/js/custom.js | // These three variables below need to be updated at each release for the selectors.
const languages = [ "rust", "python", "node" ];
// Last stable version for each language
const stableVersion = {
"rust": "master",
"python": "v0.10.0",
"node": "master"
}
// Dictionary doc folder to Label for each language
const versionMapping = {
"rust": {
"master": "master",
},
"python": {
"master": "master",
"v0.9.4": "v0.9.4",
"v0.10.0": "v0.10.0",
},
"node": {
"master": "master",
}
};
// Dictionnary language name to Label
const languageName = {
"rust": "Rust",
"python": "Python",
"node": "Node.js"
};
const defaultLanguage = "python";
function addIcon() {
const huggingFaceLogo =
"https://huggingface.co/landing/assets/transformers-docs/huggingface_logo.svg";
const image = document.createElement("img");
image.setAttribute("src", huggingFaceLogo);
const div = document.createElement("div");
div.appendChild(image);
div.style.textAlign = 'center';
div.style.paddingTop = '30px';
div.style.backgroundColor = '#6670FF';
const scrollDiv = document.querySelector(".wy-side-scroll");
scrollDiv.prepend(div);
}
function addCustomFooter() {
const customFooter = document.createElement("div");
const questionOrIssue = document.createElement("div");
questionOrIssue.innerHTML =
"Stuck? Read our <a href='https://medium.com/huggingface'>Blog posts</a>" +
" or <a href='https://github.com/huggingface/tokenizers'>Create an issue</a>";
customFooter.appendChild(questionOrIssue);
customFooter.classList.add("footer");
const social = document.createElement("div");
social.classList.add("footer__Social");
const imageDetails = [ {
link: "https://huggingface.co",
imageLink: "https://huggingface.co/landing/assets/transformers-docs/website.svg"
}, {
link: "https://twitter.com/huggingface",
imageLink: "https://huggingface.co/landing/assets/transformers-docs/twitter.svg"
}, {
link: "https://github.com/huggingface",
imageLink: "https://huggingface.co/landing/assets/transformers-docs/github.svg"
}, {
link: "https://www.linkedin.com/company/huggingface/",
imageLink: "https://huggingface.co/landing/assets/transformers-docs/linkedin.svg"
} ];
imageDetails.forEach(imageLinks => {
const link = document.createElement("a");
const image = document.createElement("img");
image.src = imageLinks.imageLink;
link.href = imageLinks.link;
image.style.width = "30px";
image.classList.add("footer__CustomImage");
link.appendChild(image);
social.appendChild(link);
});
customFooter.appendChild(social);
document.querySelector("footer").appendChild(customFooter);
}
function addGithubButton() {
const div = `
<div class="github-repo">
<a class="github-button"
href="https://github.com/huggingface/tokenizers"
data-size="large"
data-show-count="true"
aria-label="Star huggingface/tokenizers on GitHub">
Star
</a>
</div>
`;
document.querySelector(".wy-side-nav-search .icon-home").insertAdjacentHTML('afterend', div);
}
function addVersionControl() {
// Default language and version
let language = defaultLanguage;
// To grab the version currently in view, we parse the url
let parts = location.pathname.split('/');
const languageIndex = parts.findIndex((part) => languages.includes(part));
language = parts[languageIndex];
let version = stableVersion[language];
const versionIndex = languageIndex + 1;
// If a version is specified, update it
if (parts[versionIndex] != "" && !parts[versionIndex].endsWith(".html")) {
// If `latest`, let's keep the default (should be the explicit latest version)
if (parts[versionIndex] != "latest") {
version = parts[versionIndex];
}
// Otherwise redirect to the latest (if not opening locally)
} else if (!parts[parts.length - 1].endsWith(".html")) {
return window.location.pathname = [language, version, parts.splice(versionIndex)].join("/");
// Opening locally, just don't show the version/language selector
} else {
return
}
// Language Menu
const languageMenu = document.createElement("div");
languageMenu.classList.add("menu-dropdown");
languageMenu.innerHTML = languages.map((lang) => {
let isVersion = false;
let updatedParts = parts.map((l, i) => {
if (isVersion) {
isVersion = false;
return 'latest';
}
if (i == languageIndex) {
isVersion = true;
return lang;
} else {
return l;
}
});
return `
<a class="dropdown-link ${lang == language? 'active' : ''}"
href=${updatedParts.join("/")}>
${languageName[lang]}
</a>
`;
}).join("\n");
// Version Menu
const versionMenu = document.createElement("div");
versionMenu.classList.add("menu-dropdown");
versionMenu.innerHTML = Object.entries(versionMapping[language]).map(([key, value]) => {
let updatedParts = parts.map((v, i) => {
if (i == versionIndex) {
return key;
} else {
return v
}
});
return `
<a class="dropdown-link ${key == version ? 'active' : ''}"
href="${updatedParts.join('/')}">
${value}
</a>
`;
}).join("\n");
// Language button
const languageButton = document.createElement("div");
languageButton.classList.add("dropdown-button");
languageButton.innerText = languageName[language].concat(" ▼");
languageButton.addEventListener("click", () => {
versionMenu.classList.remove("show");
languageMenu.classList.toggle("show");
languageButton.classList.toggle("active");
});
// Button for version selection
const versionButton = document.createElement("div");
versionButton.classList.add("dropdown-button");
versionButton.innerText = version.concat(" ▼");
// Toggle the menu when we click on the button
versionButton.addEventListener("click", () => {
languageMenu.classList.remove("show");
versionMenu.classList.toggle("show");
versionButton.classList.toggle("active");
});
// Hide the menu when we click elsewhere
window.addEventListener("click", (event) => {
if (event.target != languageButton){
languageButton.classList.remove('active');
languageMenu.classList.remove('show');
}
if (event.target != versionButton){
versionButton.classList.remove('active');
versionMenu.classList.remove('show');
}
});
const buttonContainer = document.createElement("div");
buttonContainer.classList.add("button-container");
buttonContainer.appendChild(languageButton);
buttonContainer.appendChild(versionButton);
// Container
const div = document.createElement("div");
div.classList.add("selectors");
div.appendChild(buttonContainer);
div.appendChild(languageMenu);
div.appendChild(versionMenu);
div.style.paddingTop = '25px';
div.style.backgroundColor = '#6670FF';
div.style.display = 'block';
div.style.textAlign = 'center';
const scrollDiv = document.querySelector(".wy-side-scroll");
scrollDiv.insertBefore(div, scrollDiv.children[1]);
}
function addHfMenu() {
const div = `
<div class="hf-menu">
<a href="/welcome">🔥 Sign in</a>
<a href="/models">🚀 Models</a>
<a href="http://discuss.huggingface.co">💬 Forum</a>
</div>
`;
document.body.insertAdjacentHTML('afterbegin', div);
}
/*!
* github-buttons v2.2.10
* (c) 2019 なつき
* @license BSD-2-Clause
*/
/**
* modified to run programmatically
*/
function parseGithubButtons (){"use strict";var e=window.document,t=e.location,o=window.encodeURIComponent,r=window.decodeURIComponent,n=window.Math,a=window.HTMLElement,i=window.XMLHttpRequest,l="https://unpkg.com/[email protected]/dist/buttons.html",c=i&&i.prototype&&"withCredentials"in i.prototype,d=c&&a&&a.prototype.attachShadow&&!a.prototype.attachShadow.prototype,s=function(e,t,o){e.addEventListener?e.addEventListener(t,o):e.attachEvent("on"+t,o)},u=function(e,t,o){e.removeEventListener?e.removeEventListener(t,o):e.detachEvent("on"+t,o)},h=function(e,t,o){var r=function(n){return u(e,t,r),o(n)};s(e,t,r)},f=function(e,t,o){var r=function(n){if(t.test(e.readyState))return u(e,"readystatechange",r),o(n)};s(e,"readystatechange",r)},p=function(e){return function(t,o,r){var n=e.createElement(t);if(o)for(var a in o){var i=o[a];null!=i&&(null!=n[a]?n[a]=i:n.setAttribute(a,i))}if(r)for(var l=0,c=r.length;l<c;l++){var d=r[l];n.appendChild("string"==typeof d?e.createTextNode(d):d)}return n}},g=p(e),b=function(e){var t;return function(){t||(t=1,e.apply(this,arguments))}},m="body{margin:0}a{color:#24292e;text-decoration:none;outline:0}.octicon{display:inline-block;vertical-align:text-top;fill:currentColor}.widget{ display:inline-block;overflow:hidden;font-family:-apple-system, BlinkMacSystemFont, \"Segoe UI\", Helvetica, Arial, sans-serif;font-size:0;white-space:nowrap;-webkit-user-select:none;-moz-user-select:none;-ms-user-select:none;user-select:none}.btn,.social-count{display:inline-block;height:14px;padding:2px 5px;font-size:11px;font-weight:600;line-height:14px;vertical-align:bottom;cursor:pointer;border:1px solid #c5c9cc;border-radius:0.25em}.btn{background-color:#eff3f6;background-image:-webkit-linear-gradient(top, #fafbfc, #eff3f6 90%);background-image:-moz-linear-gradient(top, #fafbfc, #eff3f6 90%);background-image:linear-gradient(180deg, #fafbfc, #eff3f6 90%);background-position:-1px -1px;background-repeat:repeat-x;background-size:110% 110%;border-color:rgba(27,31,35,0.2);-ms-filter:\"progid:DXImageTransform.Microsoft.Gradient(startColorstr='#FFFAFBFC', endColorstr='#FFEEF2F5')\";*filter:progid:DXImageTransform.Microsoft.Gradient(startColorstr='#FFFAFBFC', endColorstr='#FFEEF2F5')}.btn:active{background-color:#e9ecef;background-image:none;border-color:#a5a9ac;border-color:rgba(27,31,35,0.35);box-shadow:inset 0 0.15em 0.3em rgba(27,31,35,0.15)}.btn:focus,.btn:hover{background-color:#e6ebf1;background-image:-webkit-linear-gradient(top, #f0f3f6, #e6ebf1 90%);background-image:-moz-linear-gradient(top, #f0f3f6, #e6ebf1 90%);background-image:linear-gradient(180deg, #f0f3f6, #e6ebf1 90%);border-color:#a5a9ac;border-color:rgba(27,31,35,0.35);-ms-filter:\"progid:DXImageTransform.Microsoft.Gradient(startColorstr='#FFF0F3F6', endColorstr='#FFE5EAF0')\";*filter:progid:DXImageTransform.Microsoft.Gradient(startColorstr='#FFF0F3F6', endColorstr='#FFE5EAF0')}.social-count{position:relative;margin-left:5px;background-color:#fff}.social-count:focus,.social-count:hover{color:#0366d6}.social-count b,.social-count i{position:absolute;top:50%;left:0;display:block;width:0;height:0;margin:-4px 0 0 -4px;border:solid transparent;border-width:4px 4px 4px 0;_line-height:0;_border-top-color:red !important;_border-bottom-color:red !important;_border-left-color:red !important;_filter:chroma(color=red)}.social-count b{border-right-color:#c5c9cc}.social-count i{margin-left:-3px;border-right-color:#fff}.lg .btn,.lg .social-count{height:16px;padding:5px 10px;font-size:12px;line-height:16px}.lg .social-count{margin-left:6px}.lg .social-count b,.lg .social-count i{margin:-5px 0 0 -5px;border-width:5px 5px 5px 0}.lg .social-count i{margin-left:-4px}\n",v={"mark-github":{width:16,height:16,path:'<path fill-rule="evenodd" d="M8 0C3.58 0 0 3.58 0 8c0 3.54 2.29 6.53 5.47 7.59.4.07.55-.17.55-.38 0-.19-.01-.82-.01-1.49-2.01.37-2.53-.49-2.69-.94-.09-.23-.48-.94-.82-1.13-.28-.15-.68-.52-.01-.53.63-.01 1.08.58 1.23.82.72 1.21 1.87.87 2.33.66.07-.52.28-.87.51-1.07-1.78-.2-3.64-.89-3.64-3.95 0-.87.31-1.59.82-2.15-.08-.2-.36-1.02.08-2.12 0 0 .67-.21 2.2.82.64-.18 1.32-.27 2-.27.68 0 1.36.09 2 .27 1.53-1.04 2.2-.82 2.2-.82.44 1.1.16 1.92.08 2.12.51.56.82 1.27.82 2.15 0 3.07-1.87 3.75-3.65 3.95.29.25.54.73.54 1.48 0 1.07-.01 1.93-.01 2.2 0 .21.15.46.55.38A8.013 8.013 0 0 0 16 8c0-4.42-3.58-8-8-8z"/>'},eye:{width:16,height:16,path:'<path fill-rule="evenodd" d="M8.06 2C3 2 0 8 0 8s3 6 8.06 6C13 14 16 8 16 8s-3-6-7.94-6zM8 12c-2.2 0-4-1.78-4-4 0-2.2 1.8-4 4-4 2.22 0 4 1.8 4 4 0 2.22-1.78 4-4 4zm2-4c0 1.11-.89 2-2 2-1.11 0-2-.89-2-2 0-1.11.89-2 2-2 1.11 0 2 .89 2 2z"/>'},star:{width:14,height:16,path:'<path fill-rule="evenodd" d="M14 6l-4.9-.64L7 1 4.9 5.36 0 6l3.6 3.26L2.67 14 7 11.67 11.33 14l-.93-4.74L14 6z"/>'},"repo-forked":{width:10,height:16,path:'<path fill-rule="evenodd" d="M8 1a1.993 1.993 0 0 0-1 3.72V6L5 8 3 6V4.72A1.993 1.993 0 0 0 2 1a1.993 1.993 0 0 0-1 3.72V6.5l3 3v1.78A1.993 1.993 0 0 0 5 15a1.993 1.993 0 0 0 1-3.72V9.5l3-3V4.72A1.993 1.993 0 0 0 8 1zM2 4.2C1.34 4.2.8 3.65.8 3c0-.65.55-1.2 1.2-1.2.65 0 1.2.55 1.2 1.2 0 .65-.55 1.2-1.2 1.2zm3 10c-.66 0-1.2-.55-1.2-1.2 0-.65.55-1.2 1.2-1.2.65 0 1.2.55 1.2 1.2 0 .65-.55 1.2-1.2 1.2zm3-10c-.66 0-1.2-.55-1.2-1.2 0-.65.55-1.2 1.2-1.2.65 0 1.2.55 1.2 1.2 0 .65-.55 1.2-1.2 1.2z"/>'},"issue-opened":{width:14,height:16,path:'<path fill-rule="evenodd" d="M7 2.3c3.14 0 5.7 2.56 5.7 5.7s-2.56 5.7-5.7 5.7A5.71 5.71 0 0 1 1.3 8c0-3.14 2.56-5.7 5.7-5.7zM7 1C3.14 1 0 4.14 0 8s3.14 7 7 7 7-3.14 7-7-3.14-7-7-7zm1 3H6v5h2V4zm0 6H6v2h2v-2z"/>'},"cloud-download":{width:16,height:16,path:'<path fill-rule="evenodd" d="M9 12h2l-3 3-3-3h2V7h2v5zm3-8c0-.44-.91-3-4.5-3C5.08 1 3 2.92 3 5 1.02 5 0 6.52 0 8c0 1.53 1 3 3 3h3V9.7H3C1.38 9.7 1.3 8.28 1.3 8c0-.17.05-1.7 1.7-1.7h1.3V5c0-1.39 1.56-2.7 3.2-2.7 2.55 0 3.13 1.55 3.2 1.8v1.2H12c.81 0 2.7.22 2.7 2.2 0 2.09-2.25 2.2-2.7 2.2h-2V11h2c2.08 0 4-1.16 4-3.5C16 5.06 14.08 4 12 4z"/>'}},w={},x=function(e,t,o){var r=p(e.ownerDocument),n=e.appendChild(r("style",{type:"text/css"}));n.styleSheet?n.styleSheet.cssText=m:n.appendChild(e.ownerDocument.createTextNode(m));var a,l,d=r("a",{className:"btn",href:t.href,target:"_blank",innerHTML:(a=t["data-icon"],l=/^large$/i.test(t["data-size"])?16:14,a=(""+a).toLowerCase().replace(/^octicon-/,""),{}.hasOwnProperty.call(v,a)||(a="mark-github"),'<svg version="1.1" width="'+l*v[a].width/v[a].height+'" height="'+l+'" viewBox="0 0 '+v[a].width+" "+v[a].height+'" class="octicon octicon-'+a+'" aria-hidden="true">'+v[a].path+"</svg>"),"aria-label":t["aria-label"]||void 0},[" ",r("span",{},[t["data-text"]||""])]);/\.github\.com$/.test("."+d.hostname)?/^https?:\/\/((gist\.)?github\.com\/[^\/?#]+\/[^\/?#]+\/archive\/|github\.com\/[^\/?#]+\/[^\/?#]+\/releases\/download\/|codeload\.github\.com\/)/.test(d.href)&&(d.target="_top"):(d.href="#",d.target="_self");var u,h,g,x,y=e.appendChild(r("div",{className:"widget"+(/^large$/i.test(t["data-size"])?" lg":"")},[d]));/^(true|1)$/i.test(t["data-show-count"])&&"github.com"===d.hostname&&(u=d.pathname.replace(/^(?!\/)/,"/").match(/^\/([^\/?#]+)(?:\/([^\/?#]+)(?:\/(?:(subscription)|(fork)|(issues)|([^\/?#]+)))?)?(?:[\/?#]|$)/))&&!u[6]?(u[2]?(h="/repos/"+u[1]+"/"+u[2],u[3]?(x="subscribers_count",g="watchers"):u[4]?(x="forks_count",g="network"):u[5]?(x="open_issues_count",g="issues"):(x="stargazers_count",g="stargazers")):(h="/users/"+u[1],g=x="followers"),function(e,t){var o=w[e]||(w[e]=[]);if(!(o.push(t)>1)){var r=b(function(){for(delete w[e];t=o.shift();)t.apply(null,arguments)});if(c){var n=new i;s(n,"abort",r),s(n,"error",r),s(n,"load",function(){var e;try{e=JSON.parse(n.responseText)}catch(e){return void r(e)}r(200!==n.status,e)}),n.open("GET",e),n.send()}else{var a=this||window;a._=function(e){a._=null,r(200!==e.meta.status,e.data)};var l=p(a.document)("script",{async:!0,src:e+(/\?/.test(e)?"&":"?")+"callback=_"}),d=function(){a._&&a._({meta:{}})};s(l,"load",d),s(l,"error",d),l.readyState&&f(l,/de|m/,d),a.document.getElementsByTagName("head")[0].appendChild(l)}}}.call(this,"https://api.github.com"+h,function(e,t){if(!e){var n=t[x];y.appendChild(r("a",{className:"social-count",href:t.html_url+"/"+g,target:"_blank","aria-label":n+" "+x.replace(/_count$/,"").replace("_"," ").slice(0,n<2?-1:void 0)+" on GitHub"},[r("b"),r("i"),r("span",{},[(""+n).replace(/\B(?=(\d{3})+(?!\d))/g,",")])]))}o&&o(y)})):o&&o(y)},y=window.devicePixelRatio||1,C=function(e){return(y>1?n.ceil(n.round(e*y)/y*2)/2:n.ceil(e))||0},F=function(e,t){e.style.width=t[0]+"px",e.style.height=t[1]+"px"},k=function(t,r){if(null!=t&&null!=r)if(t.getAttribute&&(t=function(e){for(var t={href:e.href,title:e.title,"aria-label":e.getAttribute("aria-label")},o=["icon","text","size","show-count"],r=0,n=o.length;r<n;r++){var a="data-"+o[r];t[a]=e.getAttribute(a)}return null==t["data-text"]&&(t["data-text"]=e.textContent||e.innerText),t}(t)),d){var a=g("span",{title:t.title||void 0});x(a.attachShadow({mode:"closed"}),t,function(){r(a)})}else{var i=g("iframe",{src:"javascript:0",title:t.title||void 0,allowtransparency:!0,scrolling:"no",frameBorder:0});F(i,[0,0]),i.style.border="none";var c=function(){var a,d=i.contentWindow;try{a=d.document.body}catch(t){return void e.body.appendChild(i.parentNode.removeChild(i))}u(i,"load",c),x.call(d,a,t,function(e){var a=function(e){var t=e.offsetWidth,o=e.offsetHeight;if(e.getBoundingClientRect){var r=e.getBoundingClientRect();t=n.max(t,C(r.width)),o=n.max(o,C(r.height))}return[t,o]}(e);i.parentNode.removeChild(i),h(i,"load",function(){F(i,a)}),i.src=l+"#"+(i.name=function(e){var t=[];for(var r in e){var n=e[r];null!=n&&t.push(o(r)+"="+o(n))}return t.join("&")}(t)),r(i)})};s(i,"load",c),e.body.appendChild(i)}};t.protocol+"//"+t.host+t.pathname===l?x(e.body,function(e){for(var t={},o=e.split("&"),n=0,a=o.length;n<a;n++){var i=o[n];if(""!==i){var l=i.split("=");t[r(l[0])]=null!=l[1]?r(l.slice(1).join("=")):void 0}}return t}(window.name||t.hash.replace(/^#/,""))):function(t){if(/m/.test(e.readyState)||!/g/.test(e.readyState)&&!e.documentElement.doScroll)setTimeout(t);else if(e.addEventListener){var o=b(t);h(e,"DOMContentLoaded",o),h(window,"load",o)}else f(e,/m/,t)}(function(){for(var t=e.querySelectorAll?e.querySelectorAll("a.github-button"):function(){for(var t=[],o=e.getElementsByTagName("a"),r=0,n=o.length;r<n;r++)~(" "+o[r].className+" ").replace(/[ \t\n\f\r]+/g," ").indexOf(" github-button ")&&t.push(o[r]);return t}(),o=0,r=t.length;o<r;o++)!function(e){k(e,function(t){e.parentNode.replaceChild(t,e)})}(t[o])})};
function onLoad() {
addIcon();
addVersionControl();
addCustomFooter();
addGithubButton();
parseGithubButtons();
addHfMenu();
}
window.addEventListener("load", onLoad);
| 0 |
hf_public_repos/tokenizers/docs/source/_static | hf_public_repos/tokenizers/docs/source/_static/css/code-snippets.css |
.highlight .c1, .highlight .sd{
color: #999
}
.highlight .nn, .highlight .k, .highlight .s1, .highlight .nb, .highlight .bp, .highlight .kc, .highlight .kt {
color: #FB8D68;
}
.highlight .kn, .highlight .nv, .highlight .s2, .highlight .ow, .highlight .kd, .highlight .kr, .highlight .s {
color: #6670FF;
}
.highlight .gp {
color: #FB8D68;
}
| 0 |
hf_public_repos/tokenizers/docs/source/_static | hf_public_repos/tokenizers/docs/source/_static/css/huggingface.css | /* Our DOM objects */
/* Version control */
.selectors {
margin-bottom: 10px;
}
.dropdown-button {
display: inline-block;
width: 50%;
background-color: #6670FF;
color: white;
border: none;
padding: 5px;
font-size: 15px;
cursor: pointer;
}
.dropdown-button:hover, .dropdown-button:focus, .dropdown-button.active {
background-color: #A6B0FF;
}
.dropdown-button.active {
background-color: #7988FF;
}
.menu-dropdown {
display: none;
background-color: #7988FF;
min-width: 160px;
overflow: auto;
font-size: 15px;
padding: 10px 0;
}
.menu-dropdown a {
color: white;
padding: 3px 4px;
text-decoration: none;
display: block;
}
.menu-dropdown a:hover {
background-color: #A6B0FF;
}
.dropdown-link.active {
background-color: #A6B0FF;
}
.show {
display: block;
}
/* The literal code blocks */
.rst-content tt.literal, .rst-content tt.literal, .rst-content code.literal {
color: #6670FF;
}
/* To keep the logo centered */
.wy-side-scroll {
width: auto;
font-size: 20px;
}
/* The div that holds the Hugging Face logo */
.HuggingFaceDiv {
width: 100%
}
/* The research field on top of the toc tree */
.wy-side-nav-search{
padding-top: 0;
background-color: #6670FF;
}
/* The toc tree */
.wy-nav-side{
background-color: #6670FF;
padding-bottom: 0;
}
/* The section headers in the toc tree */
.wy-menu-vertical p.caption{
background-color: #4d59ff;
line-height: 40px;
}
/* The selected items in the toc tree */
.wy-menu-vertical li.current{
background-color: #A6B0FF;
}
/* When a list item that does belong to the selected block from the toc tree is hovered */
.wy-menu-vertical li.current a:hover{
background-color: #B6C0FF;
}
/* When a list item that does NOT belong to the selected block from the toc tree is hovered. */
.wy-menu-vertical li a:hover{
background-color: #A7AFFB;
}
/* The text items on the toc tree */
.wy-menu-vertical a {
color: #FFFFDD;
font-family: Calibre-Light, sans-serif;
}
.wy-menu-vertical header, .wy-menu-vertical p.caption{
color: white;
font-family: Calibre-Light, sans-serif;
}
/* The color inside the selected toc tree block */
.wy-menu-vertical li.toctree-l2 a, .wy-menu-vertical li.toctree-l3 a, .wy-menu-vertical li.toctree-l4 a {
color: black;
}
/* Inside the depth-2 selected toc tree block */
.wy-menu-vertical li.toctree-l2.current>a {
background-color: #B6C0FF
}
.wy-menu-vertical li.toctree-l2.current li.toctree-l3>a {
background-color: #C6D0FF
}
/* Inside the depth-3 selected toc tree block */
.wy-menu-vertical li.toctree-l3.current li.toctree-l4>a{
background-color: #D6E0FF
}
/* Inside code snippets */
.rst-content dl:not(.docutils) dt{
font-size: 15px;
}
/* Links */
a {
color: #6670FF;
}
/* Content bars */
.rst-content dl:not(.docutils) dt {
background-color: rgba(251, 141, 104, 0.1);
border-right: solid 2px #FB8D68;
border-left: solid 2px #FB8D68;
color: #FB8D68;
font-family: Calibre-Light, sans-serif;
border-top: none;
font-style: normal !important;
}
/* Expand button */
.wy-menu-vertical li.toctree-l2 span.toctree-expand,
.wy-menu-vertical li.on a span.toctree-expand, .wy-menu-vertical li.current>a span.toctree-expand,
.wy-menu-vertical li.toctree-l3 span.toctree-expand{
color: black;
}
/* Max window size */
.wy-nav-content{
max-width: 1200px;
}
/* Mobile header */
.wy-nav-top{
background-color: #6670FF;
}
/* Source spans */
.rst-content .viewcode-link, .rst-content .viewcode-back{
color: #6670FF;
font-size: 110%;
letter-spacing: 2px;
text-transform: uppercase;
}
/* It would be better for table to be visible without horizontal scrolling */
.wy-table-responsive table td, .wy-table-responsive table th{
white-space: normal;
}
.footer {
margin-top: 20px;
}
.footer__Social {
display: flex;
flex-direction: row;
}
.footer__CustomImage {
margin: 2px 5px 0 0;
}
/* class and method names in doc */
.rst-content dl:not(.docutils) tt.descname, .rst-content dl:not(.docutils) tt.descclassname, .rst-content dl:not(.docutils) tt.descname, .rst-content dl:not(.docutils) code.descname, .rst-content dl:not(.docutils) tt.descclassname, .rst-content dl:not(.docutils) code.descclassname{
font-family: Calibre, sans-serif;
font-size: 20px !important;
}
/* class name in doc*/
.rst-content dl:not(.docutils) tt.descname, .rst-content dl:not(.docutils) tt.descname, .rst-content dl:not(.docutils) code.descname{
margin-right: 10px;
font-family: Calibre-Medium, sans-serif;
}
/* Method and class parameters */
.sig-param{
line-height: 23px;
}
/* Class introduction "class" string at beginning */
.rst-content dl:not(.docutils) .property{
font-size: 18px;
color: black;
}
/* FONTS */
body{
font-family: Calibre, sans-serif;
font-size: 16px;
}
h1 {
font-family: Calibre-Thin, sans-serif;
font-size: 70px;
}
h2, .rst-content .toctree-wrapper p.caption, h3, h4, h5, h6, legend{
font-family: Calibre-Medium, sans-serif;
}
@font-face {
font-family: Calibre-Medium;
src: url(./Calibre-Medium.otf);
font-weight:400;
}
@font-face {
font-family: Calibre;
src: url(./Calibre-Regular.otf);
font-weight:400;
}
@font-face {
font-family: Calibre-Light;
src: url(./Calibre-Light.ttf);
font-weight:400;
}
@font-face {
font-family: Calibre-Thin;
src: url(./Calibre-Thin.otf);
font-weight:400;
}
/**
* Nav Links to other parts of huggingface.co
*/
div.hf-menu {
position: absolute;
top: 0;
right: 0;
padding-top: 20px;
padding-right: 20px;
z-index: 1000;
}
div.hf-menu a {
font-size: 14px;
letter-spacing: 0.3px;
text-transform: uppercase;
color: white;
-webkit-font-smoothing: antialiased;
background: linear-gradient(0deg, #6671ffb8, #9a66ffb8 50%);
padding: 10px 16px 6px 16px;
border-radius: 3px;
margin-left: 12px;
position: relative;
}
div.hf-menu a:active {
top: 1px;
}
@media (min-width: 768px) and (max-width: 1860px) {
.wy-breadcrumbs {
margin-top: 32px;
}
}
@media (max-width: 768px) {
div.hf-menu {
display: none;
}
}
| 0 |
hf_public_repos/tokenizers/docs/source/tutorials | hf_public_repos/tokenizers/docs/source/tutorials/python/training_from_memory.rst | Training from memory
----------------------------------------------------------------------------------------------------
In the `Quicktour <quicktour>`__, we saw how to build and train a tokenizer using text files,
but we can actually use any Python Iterator. In this section we'll see a few different ways of
training our tokenizer.
For all the examples listed below, we'll use the same :class:`~tokenizers.Tokenizer` and
:class:`~tokenizers.trainers.Trainer`, built as following:
.. literalinclude:: ../../../../bindings/python/tests/documentation/test_tutorial_train_from_iterators.py
:language: python
:start-after: START init_tokenizer_trainer
:end-before: END init_tokenizer_trainer
:dedent: 8
This tokenizer is based on the :class:`~tokenizers.models.Unigram` model. It takes care of
normalizing the input using the NFKC Unicode normalization method, and uses a
:class:`~tokenizers.pre_tokenizers.ByteLevel` pre-tokenizer with the corresponding decoder.
For more information on the components used here, you can check `here <components>`__
The most basic way
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
As you probably guessed already, the easiest way to train our tokenizer is by using a :obj:`List`:
.. literalinclude:: ../../../../bindings/python/tests/documentation/test_tutorial_train_from_iterators.py
:language: python
:start-after: START train_basic
:end-before: END train_basic
:dedent: 8
Easy, right? You can use anything working as an iterator here, be it a :obj:`List`, :obj:`Tuple`,
or a :obj:`np.Array`. Anything works as long as it provides strings.
Using the 🤗 Datasets library
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
An awesome way to access one of the many datasets that exist out there is by using the 🤗 Datasets
library. For more information about it, you should check
`the official documentation here <https://huggingface.co/docs/datasets/>`__.
Let's start by loading our dataset:
.. literalinclude:: ../../../../bindings/python/tests/documentation/test_tutorial_train_from_iterators.py
:language: python
:start-after: START load_dataset
:end-before: END load_dataset
:dedent: 8
The next step is to build an iterator over this dataset. The easiest way to do this is probably by
using a generator:
.. literalinclude:: ../../../../bindings/python/tests/documentation/test_tutorial_train_from_iterators.py
:language: python
:start-after: START def_batch_iterator
:end-before: END def_batch_iterator
:dedent: 8
As you can see here, for improved efficiency we can actually provide a batch of examples used
to train, instead of iterating over them one by one. By doing so, we can expect performances very
similar to those we got while training directly from files.
With our iterator ready, we just need to launch the training. In order to improve the look of our
progress bars, we can specify the total length of the dataset:
.. literalinclude:: ../../../../bindings/python/tests/documentation/test_tutorial_train_from_iterators.py
:language: python
:start-after: START train_datasets
:end-before: END train_datasets
:dedent: 8
And that's it!
Using gzip files
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Since gzip files in Python can be used as iterators, it is extremely simple to train on such files:
.. literalinclude:: ../../../../bindings/python/tests/documentation/test_tutorial_train_from_iterators.py
:language: python
:start-after: START single_gzip
:end-before: END single_gzip
:dedent: 8
Now if we wanted to train from multiple gzip files, it wouldn't be much harder:
.. literalinclude:: ../../../../bindings/python/tests/documentation/test_tutorial_train_from_iterators.py
:language: python
:start-after: START multi_gzip
:end-before: END multi_gzip
:dedent: 8
And voilà!
| 0 |
hf_public_repos/tokenizers/docs | hf_public_repos/tokenizers/docs/source-doc-builder/installation.mdx | # Installation
<tokenizerslangcontent>
<python>
🤗 Tokenizers is tested on Python 3.5+.
You should install 🤗 Tokenizers in a [virtual environment](https://docs.python.org/3/library/venv.html). If you're
unfamiliar with Python virtual environments, check out the [user
guide](https://packaging.python.org/guides/installing-using-pip-and-virtual-environments/).
Create a virtual environment with the version of Python you're going to
use and activate it.
## Installation with pip
🤗 Tokenizers can be installed using pip as follows:
```bash
pip install tokenizers
```
## Installation from sources
To use this method, you need to have the Rust language installed. You
can follow [the official
guide](https://www.rust-lang.org/learn/get-started) for more
information.
If you are using a unix based OS, the installation should be as simple
as running:
```bash
curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh
```
Or you can easiy update it with the following command:
```bash
rustup update
```
Once rust is installed, we can start retrieving the sources for 🤗
Tokenizers:
```bash
git clone https://github.com/huggingface/tokenizers
```
Then we go into the python bindings folder:
```bash
cd tokenizers/bindings/python
```
At this point you should have your [virtual environment]() already
activated. In order to compile 🤗 Tokenizers, you need to install the
Python package `setuptools_rust`:
```bash
pip install setuptools_rust
```
Then you can have 🤗 Tokenizers compiled and installed in your virtual
environment with the following command:
```bash
python setup.py install
```
</python>
<rust>
## Crates.io
🤗 Tokenizers is available on [crates.io](https://crates.io/crates/tokenizers).
You just need to add it to your `Cargo.toml`:
```bash
tokenizers = "0.10"
```
</rust>
<node>
## Installation with npm
You can simply install 🤗 Tokenizers with npm using:
```bash
npm install tokenizers
```
</node>
</tokenizerslangcontent> | 0 |
hf_public_repos/tokenizers/docs | hf_public_repos/tokenizers/docs/source-doc-builder/index.mdx | <!-- DISABLE-FRONTMATTER-SECTIONS -->
# Tokenizers
Fast State-of-the-art tokenizers, optimized for both research and
production
[🤗 Tokenizers](https://github.com/huggingface/tokenizers) provides an
implementation of today's most used tokenizers, with a focus on
performance and versatility. These tokenizers are also used in [🤗 Transformers](https://github.com/huggingface/transformers).
# Main features:
- Train new vocabularies and tokenize, using today's most used tokenizers.
- Extremely fast (both training and tokenization), thanks to the Rust implementation. Takes less than 20 seconds to tokenize a GB of text on a server's CPU.
- Easy to use, but also extremely versatile.
- Designed for both research and production.
- Full alignment tracking. Even with destructive normalization, it's always possible to get the part of the original sentence that corresponds to any token.
- Does all the pre-processing: Truncation, Padding, add the special tokens your model needs.
| 0 |
hf_public_repos/tokenizers/docs | hf_public_repos/tokenizers/docs/source-doc-builder/pipeline.mdx | # The tokenization pipeline
When calling `Tokenizer.encode` or
`Tokenizer.encode_batch`, the input
text(s) go through the following pipeline:
- `normalization`
- `pre-tokenization`
- `model`
- `post-processing`
We'll see in details what happens during each of those steps in detail,
as well as when you want to `decode <decoding>` some token ids, and how the 🤗 Tokenizers library allows you
to customize each of those steps to your needs. If you're already
familiar with those steps and want to learn by seeing some code, jump to
`our BERT from scratch example <example>`.
For the examples that require a `Tokenizer` we will use the tokenizer we trained in the
`quicktour`, which you can load with:
<tokenizerslangcontent>
<python>
<literalinclude>
{"path": "../../bindings/python/tests/documentation/test_pipeline.py",
"language": "python",
"start-after": "START reload_tokenizer",
"end-before": "END reload_tokenizer",
"dedent": 12}
</literalinclude>
</python>
<rust>
<literalinclude>
{"path": "../../tokenizers/tests/documentation.rs",
"language": "rust",
"start-after": "START pipeline_reload_tokenizer",
"end-before": "END pipeline_reload_tokenizer",
"dedent": 4}
</literalinclude>
</rust>
<node>
<literalinclude>
{"path": "../../bindings/node/examples/documentation/pipeline.test.ts",
"language": "js",
"start-after": "START reload_tokenizer",
"end-before": "END reload_tokenizer",
"dedent": 8}
</literalinclude>
</node>
</tokenizerslangcontent>
## Normalization
Normalization is, in a nutshell, a set of operations you apply to a raw
string to make it less random or "cleaner". Common operations include
stripping whitespace, removing accented characters or lowercasing all
text. If you're familiar with [Unicode
normalization](https://unicode.org/reports/tr15), it is also a very
common normalization operation applied in most tokenizers.
Each normalization operation is represented in the 🤗 Tokenizers library
by a `Normalizer`, and you can combine
several of those by using a `normalizers.Sequence`. Here is a normalizer applying NFD Unicode normalization
and removing accents as an example:
<tokenizerslangcontent>
<python>
<literalinclude>
{"path": "../../bindings/python/tests/documentation/test_pipeline.py",
"language": "python",
"start-after": "START setup_normalizer",
"end-before": "END setup_normalizer",
"dedent": 8}
</literalinclude>
</python>
<rust>
<literalinclude>
{"path": "../../tokenizers/tests/documentation.rs",
"language": "rust",
"start-after": "START pipeline_setup_normalizer",
"end-before": "END pipeline_setup_normalizer",
"dedent": 4}
</literalinclude>
</rust>
<node>
<literalinclude>
{"path": "../../bindings/node/examples/documentation/pipeline.test.ts",
"language": "js",
"start-after": "START setup_normalizer",
"end-before": "END setup_normalizer",
"dedent": 8}
</literalinclude>
</node>
</tokenizerslangcontent>
You can manually test that normalizer by applying it to any string:
<tokenizerslangcontent>
<python>
<literalinclude>
{"path": "../../bindings/python/tests/documentation/test_pipeline.py",
"language": "python",
"start-after": "START test_normalizer",
"end-before": "END test_normalizer",
"dedent": 8}
</literalinclude>
</python>
<rust>
<literalinclude>
{"path": "../../tokenizers/tests/documentation.rs",
"language": "rust",
"start-after": "START pipeline_test_normalizer",
"end-before": "END pipeline_test_normalizer",
"dedent": 4}
</literalinclude>
</rust>
<node>
<literalinclude>
{"path": "../../bindings/node/examples/documentation/pipeline.test.ts",
"language": "js",
"start-after": "START test_normalizer",
"end-before": "END test_normalizer",
"dedent": 8}
</literalinclude>
</node>
</tokenizerslangcontent>
When building a `Tokenizer`, you can
customize its normalizer by just changing the corresponding attribute:
<tokenizerslangcontent>
<python>
<literalinclude>
{"path": "../../bindings/python/tests/documentation/test_pipeline.py",
"language": "python",
"start-after": "START replace_normalizer",
"end-before": "END replace_normalizer",
"dedent": 8}
</literalinclude>
</python>
<rust>
<literalinclude>
{"path": "../../tokenizers/tests/documentation.rs",
"language": "rust",
"start-after": "START pipeline_replace_normalizer",
"end-before": "END pipeline_replace_normalizer",
"dedent": 4}
</literalinclude>
</rust>
<node>
<literalinclude>
{"path": "../../bindings/node/examples/documentation/pipeline.test.ts",
"language": "js",
"start-after": "START replace_normalizer",
"end-before": "END replace_normalizer",
"dedent": 8}
</literalinclude>
</node>
</tokenizerslangcontent>
Of course, if you change the way a tokenizer applies normalization, you
should probably retrain it from scratch afterward.
## Pre-Tokenization
Pre-tokenization is the act of splitting a text into smaller objects
that give an upper bound to what your tokens will be at the end of
training. A good way to think of this is that the pre-tokenizer will
split your text into "words" and then, your final tokens will be parts
of those words.
An easy way to pre-tokenize inputs is to split on spaces and
punctuations, which is done by the
`pre_tokenizers.Whitespace`
pre-tokenizer:
<tokenizerslangcontent>
<python>
<literalinclude>
{"path": "../../bindings/python/tests/documentation/test_pipeline.py",
"language": "python",
"start-after": "START setup_pre_tokenizer",
"end-before": "END setup_pre_tokenizer",
"dedent": 8}
</literalinclude>
</python>
<rust>
<literalinclude>
{"path": "../../tokenizers/tests/documentation.rs",
"language": "rust",
"start-after": "START pipeline_setup_pre_tokenizer",
"end-before": "END pipeline_setup_pre_tokenizer",
"dedent": 4}
</literalinclude>
</rust>
<node>
<literalinclude>
{"path": "../../bindings/node/examples/documentation/pipeline.test.ts",
"language": "js",
"start-after": "START setup_pre_tokenizer",
"end-before": "END setup_pre_tokenizer",
"dedent": 8}
</literalinclude>
</node>
</tokenizerslangcontent>
The output is a list of tuples, with each tuple containing one word and
its span in the original sentence (which is used to determine the final
`offsets` of our `Encoding`). Note that splitting on
punctuation will split contractions like `"I'm"` in this example.
You can combine together any `PreTokenizer` together. For instance, here is a pre-tokenizer that will
split on space, punctuation and digits, separating numbers in their
individual digits:
<tokenizerslangcontent>
<python>
<literalinclude>
{"path": "../../bindings/python/tests/documentation/test_pipeline.py",
"language": "python",
"start-after": "START combine_pre_tokenizer",
"end-before": "END combine_pre_tokenizer",
"dedent": 8}
</literalinclude>
</python>
<rust>
<literalinclude>
{"path": "../../tokenizers/tests/documentation.rs",
"language": "rust",
"start-after": "START pipeline_combine_pre_tokenizer",
"end-before": "END pipeline_combine_pre_tokenizer",
"dedent": 4}
</literalinclude>
</rust>
<node>
<literalinclude>
{"path": "../../bindings/node/examples/documentation/pipeline.test.ts",
"language": "js",
"start-after": "START combine_pre_tokenizer",
"end-before": "END combine_pre_tokenizer",
"dedent": 8}
</literalinclude>
</node>
</tokenizerslangcontent>
As we saw in the `quicktour`, you can
customize the pre-tokenizer of a `Tokenizer` by just changing the corresponding attribute:
<tokenizerslangcontent>
<python>
<literalinclude>
{"path": "../../bindings/python/tests/documentation/test_pipeline.py",
"language": "python",
"start-after": "START replace_pre_tokenizer",
"end-before": "END replace_pre_tokenizer",
"dedent": 8}
</literalinclude>
</python>
<rust>
<literalinclude>
{"path": "../../tokenizers/tests/documentation.rs",
"language": "rust",
"start-after": "START pipeline_replace_pre_tokenizer",
"end-before": "END pipeline_replace_pre_tokenizer",
"dedent": 4}
</literalinclude>
</rust>
<node>
<literalinclude>
{"path": "../../bindings/node/examples/documentation/pipeline.test.ts",
"language": "js",
"start-after": "START replace_pre_tokenizer",
"end-before": "END replace_pre_tokenizer",
"dedent": 8}
</literalinclude>
</node>
</tokenizerslangcontent>
Of course, if you change the way the pre-tokenizer, you should probably
retrain your tokenizer from scratch afterward.
## Model
Once the input texts are normalized and pre-tokenized, the
`Tokenizer` applies the model on the
pre-tokens. This is the part of the pipeline that needs training on your
corpus (or that has been trained if you are using a pretrained
tokenizer).
The role of the model is to split your "words" into tokens, using the
rules it has learned. It's also responsible for mapping those tokens to
their corresponding IDs in the vocabulary of the model.
This model is passed along when intializing the
`Tokenizer` so you already know how to
customize this part. Currently, the 🤗 Tokenizers library supports:
- `models.BPE`
- `models.Unigram`
- `models.WordLevel`
- `models.WordPiece`
For more details about each model and its behavior, you can check
[here](components#models)
## Post-Processing
Post-processing is the last step of the tokenization pipeline, to
perform any additional transformation to the
`Encoding` before it's returned, like
adding potential special tokens.
As we saw in the quick tour, we can customize the post processor of a
`Tokenizer` by setting the
corresponding attribute. For instance, here is how we can post-process
to make the inputs suitable for the BERT model:
<tokenizerslangcontent>
<python>
<literalinclude>
{"path": "../../bindings/python/tests/documentation/test_pipeline.py",
"language": "python",
"start-after": "START setup_processor",
"end-before": "END setup_processor",
"dedent": 8}
</literalinclude>
</python>
<rust>
<literalinclude>
{"path": "../../tokenizers/tests/documentation.rs",
"language": "rust",
"start-after": "START pipeline_setup_processor",
"end-before": "END pipeline_setup_processor",
"dedent": 4}
</literalinclude>
</rust>
<node>
<literalinclude>
{"path": "../../bindings/node/examples/documentation/pipeline.test.ts",
"language": "js",
"start-after": "START setup_processor",
"end-before": "END setup_processor",
"dedent": 8}
</literalinclude>
</node>
</tokenizerslangcontent>
Note that contrarily to the pre-tokenizer or the normalizer, you don't
need to retrain a tokenizer after changing its post-processor.
## All together: a BERT tokenizer from scratch
Let's put all those pieces together to build a BERT tokenizer. First,
BERT relies on WordPiece, so we instantiate a new
`Tokenizer` with this model:
<tokenizerslangcontent>
<python>
<literalinclude>
{"path": "../../bindings/python/tests/documentation/test_pipeline.py",
"language": "python",
"start-after": "START bert_setup_tokenizer",
"end-before": "END bert_setup_tokenizer",
"dedent": 8}
</literalinclude>
</python>
<rust>
<literalinclude>
{"path": "../../tokenizers/tests/documentation.rs",
"language": "rust",
"start-after": "START bert_setup_tokenizer",
"end-before": "END bert_setup_tokenizer",
"dedent": 4}
</literalinclude>
</rust>
<node>
<literalinclude>
{"path": "../../bindings/node/examples/documentation/pipeline.test.ts",
"language": "js",
"start-after": "START bert_setup_tokenizer",
"end-before": "END bert_setup_tokenizer",
"dedent": 8}
</literalinclude>
</node>
</tokenizerslangcontent>
Then we know that BERT preprocesses texts by removing accents and
lowercasing. We also use a unicode normalizer:
<tokenizerslangcontent>
<python>
<literalinclude>
{"path": "../../bindings/python/tests/documentation/test_pipeline.py",
"language": "python",
"start-after": "START bert_setup_normalizer",
"end-before": "END bert_setup_normalizer",
"dedent": 8}
</literalinclude>
</python>
<rust>
<literalinclude>
{"path": "../../tokenizers/tests/documentation.rs",
"language": "rust",
"start-after": "START bert_setup_normalizer",
"end-before": "END bert_setup_normalizer",
"dedent": 4}
</literalinclude>
</rust>
<node>
<literalinclude>
{"path": "../../bindings/node/examples/documentation/pipeline.test.ts",
"language": "js",
"start-after": "START bert_setup_normalizer",
"end-before": "END bert_setup_normalizer",
"dedent": 8}
</literalinclude>
</node>
</tokenizerslangcontent>
The pre-tokenizer is just splitting on whitespace and punctuation:
<tokenizerslangcontent>
<python>
<literalinclude>
{"path": "../../bindings/python/tests/documentation/test_pipeline.py",
"language": "python",
"start-after": "START bert_setup_pre_tokenizer",
"end-before": "END bert_setup_pre_tokenizer",
"dedent": 8}
</literalinclude>
</python>
<rust>
<literalinclude>
{"path": "../../tokenizers/tests/documentation.rs",
"language": "rust",
"start-after": "START bert_setup_pre_tokenizer",
"end-before": "END bert_setup_pre_tokenizer",
"dedent": 4}
</literalinclude>
</rust>
<node>
<literalinclude>
{"path": "../../bindings/node/examples/documentation/pipeline.test.ts",
"language": "js",
"start-after": "START bert_setup_pre_tokenizer",
"end-before": "END bert_setup_pre_tokenizer",
"dedent": 8}
</literalinclude>
</node>
</tokenizerslangcontent>
And the post-processing uses the template we saw in the previous
section:
<tokenizerslangcontent>
<python>
<literalinclude>
{"path": "../../bindings/python/tests/documentation/test_pipeline.py",
"language": "python",
"start-after": "START bert_setup_processor",
"end-before": "END bert_setup_processor",
"dedent": 8}
</literalinclude>
</python>
<rust>
<literalinclude>
{"path": "../../tokenizers/tests/documentation.rs",
"language": "rust",
"start-after": "START bert_setup_processor",
"end-before": "END bert_setup_processor",
"dedent": 4}
</literalinclude>
</rust>
<node>
<literalinclude>
{"path": "../../bindings/node/examples/documentation/pipeline.test.ts",
"language": "js",
"start-after": "START bert_setup_processor",
"end-before": "END bert_setup_processor",
"dedent": 8}
</literalinclude>
</node>
</tokenizerslangcontent>
We can use this tokenizer and train on it on wikitext like in the
`quicktour`:
<tokenizerslangcontent>
<python>
<literalinclude>
{"path": "../../bindings/python/tests/documentation/test_pipeline.py",
"language": "python",
"start-after": "START bert_train_tokenizer",
"end-before": "END bert_train_tokenizer",
"dedent": 8}
</literalinclude>
</python>
<rust>
<literalinclude>
{"path": "../../tokenizers/tests/documentation.rs",
"language": "rust",
"start-after": "START bert_train_tokenizer",
"end-before": "END bert_train_tokenizer",
"dedent": 4}
</literalinclude>
</rust>
<node>
<literalinclude>
{"path": "../../bindings/node/examples/documentation/pipeline.test.ts",
"language": "js",
"start-after": "START bert_train_tokenizer",
"end-before": "END bert_train_tokenizer",
"dedent": 8}
</literalinclude>
</node>
</tokenizerslangcontent>
## Decoding
On top of encoding the input texts, a `Tokenizer` also has an API for decoding, that is converting IDs
generated by your model back to a text. This is done by the methods
`Tokenizer.decode` (for one predicted text) and `Tokenizer.decode_batch` (for a batch of predictions).
The `decoder` will first convert the IDs back to tokens
(using the tokenizer's vocabulary) and remove all special tokens, then
join those tokens with spaces:
<tokenizerslangcontent>
<python>
<literalinclude>
{"path": "../../bindings/python/tests/documentation/test_pipeline.py",
"language": "python",
"start-after": "START test_decoding",
"end-before": "END test_decoding",
"dedent": 8}
</literalinclude>
</python>
<rust>
<literalinclude>
{"path": "../../tokenizers/tests/documentation.rs",
"language": "rust",
"start-after": "START pipeline_test_decoding",
"end-before": "END pipeline_test_decoding",
"dedent": 4}
</literalinclude>
</rust>
<node>
<literalinclude>
{"path": "../../bindings/node/examples/documentation/pipeline.test.ts",
"language": "js",
"start-after": "START test_decoding",
"end-before": "END test_decoding",
"dedent": 8}
</literalinclude>
</node>
</tokenizerslangcontent>
If you used a model that added special characters to represent subtokens
of a given "word" (like the `"##"` in
WordPiece) you will need to customize the `decoder` to treat
them properly. If we take our previous `bert_tokenizer` for instance the
default decoding will give:
<tokenizerslangcontent>
<python>
<literalinclude>
{"path": "../../bindings/python/tests/documentation/test_pipeline.py",
"language": "python",
"start-after": "START bert_test_decoding",
"end-before": "END bert_test_decoding",
"dedent": 8}
</literalinclude>
</python>
<rust>
<literalinclude>
{"path": "../../tokenizers/tests/documentation.rs",
"language": "rust",
"start-after": "START bert_test_decoding",
"end-before": "END bert_test_decoding",
"dedent": 4}
</literalinclude>
</rust>
<node>
<literalinclude>
{"path": "../../bindings/node/examples/documentation/pipeline.test.ts",
"language": "js",
"start-after": "START bert_test_decoding",
"end-before": "END bert_test_decoding",
"dedent": 8}
</literalinclude>
</node>
</tokenizerslangcontent>
But by changing it to a proper decoder, we get:
<tokenizerslangcontent>
<python>
<literalinclude>
{"path": "../../bindings/python/tests/documentation/test_pipeline.py",
"language": "python",
"start-after": "START bert_proper_decoding",
"end-before": "END bert_proper_decoding",
"dedent": 8}
</literalinclude>
</python>
<rust>
<literalinclude>
{"path": "../../tokenizers/tests/documentation.rs",
"language": "rust",
"start-after": "START bert_proper_decoding",
"end-before": "END bert_proper_decoding",
"dedent": 4}
</literalinclude>
</rust>
<node>
<literalinclude>
{"path": "../../bindings/node/examples/documentation/pipeline.test.ts",
"language": "js",
"start-after": "START bert_proper_decoding",
"end-before": "END bert_proper_decoding",
"dedent": 8}
</literalinclude>
</node>
</tokenizerslangcontent>
| 0 |
hf_public_repos/tokenizers/docs | hf_public_repos/tokenizers/docs/source-doc-builder/_toctree.yml | - sections:
- local: index
title: 🤗 Tokenizers
- local: quicktour
title: Quicktour
- local: installation
title: Installation
- local: pipeline
title: The tokenization pipeline
- local: components
title: Components
- local: training_from_memory
title: Training from memory
title: Getting started
- sections:
- local: api/input-sequences
title: Input Sequences
- local: api/encode-inputs
title: Encode Inputs
- local: api/tokenizer
title: Tokenizer
- local: api/encoding
title: Encoding
- local: api/added-tokens
title: Added Tokens
- local: api/models
title: Models
- local: api/normalizers
title: Normalizers
- local: api/pre-tokenizers
title: Pre-tokenizers
- local: api/post-processors
title: Post-processors
- local: api/trainers
title: Trainers
- local: api/decoders
title: Decoders
- local: api/visualizer
title: Visualizer
title: API
| 0 |
hf_public_repos/tokenizers/docs | hf_public_repos/tokenizers/docs/source-doc-builder/quicktour.mdx | # Quicktour
Let's have a quick look at the 🤗 Tokenizers library features. The
library provides an implementation of today's most used tokenizers that
is both easy to use and blazing fast.
## Build a tokenizer from scratch
To illustrate how fast the 🤗 Tokenizers library is, let's train a new
tokenizer on [wikitext-103](https://blog.einstein.ai/the-wikitext-long-term-dependency-language-modeling-dataset/)
(516M of text) in just a few seconds. First things first, you will need
to download this dataset and unzip it with:
``` bash
wget https://s3.amazonaws.com/research.metamind.io/wikitext/wikitext-103-raw-v1.zip
unzip wikitext-103-raw-v1.zip
```
### Training the tokenizer
In this tour, we will build and train a Byte-Pair Encoding (BPE)
tokenizer. For more information about the different type of tokenizers,
check out this [guide](https://huggingface.co/transformers/tokenizer_summary.html) in
the 🤗 Transformers documentation. Here, training the tokenizer means it
will learn merge rules by:
- Start with all the characters present in the training corpus as
tokens.
- Identify the most common pair of tokens and merge it into one token.
- Repeat until the vocabulary (e.g., the number of tokens) has reached
the size we want.
The main API of the library is the `class` `Tokenizer`, here is how
we instantiate one with a BPE model:
<tokenizerslangcontent>
<python>
<literalinclude>
{"path": "../../bindings/python/tests/documentation/test_quicktour.py",
"language": "python",
"start-after": "START init_tokenizer",
"end-before": "END init_tokenizer",
"dedent": 8}
</literalinclude>
</python>
<rust>
<literalinclude>
{"path": "../../tokenizers/tests/documentation.rs",
"language": "rust",
"start-after": "START quicktour_init_tokenizer",
"end-before": "END quicktour_init_tokenizer",
"dedent": 4}
</literalinclude>
</rust>
<node>
<literalinclude>
{"path": "../../bindings/node/examples/documentation/quicktour.test.ts",
"language": "js",
"start-after": "START init_tokenizer",
"end-before": "END init_tokenizer",
"dedent": 8}
</literalinclude>
</node>
</tokenizerslangcontent>
To train our tokenizer on the wikitext files, we will need to
instantiate a [trainer]{.title-ref}, in this case a
`BpeTrainer`
<tokenizerslangcontent>
<python>
<literalinclude>
{"path": "../../bindings/python/tests/documentation/test_quicktour.py",
"language": "python",
"start-after": "START init_trainer",
"end-before": "END init_trainer",
"dedent": 8}
</literalinclude>
</python>
<rust>
<literalinclude>
{"path": "../../tokenizers/tests/documentation.rs",
"language": "rust",
"start-after": "START quicktour_init_trainer",
"end-before": "END quicktour_init_trainer",
"dedent": 4}
</literalinclude>
</rust>
<node>
<literalinclude>
{"path": "../../bindings/node/examples/documentation/quicktour.test.ts",
"language": "js",
"start-after": "START init_trainer",
"end-before": "END init_trainer",
"dedent": 8}
</literalinclude>
</node>
</tokenizerslangcontent>
We can set the training arguments like `vocab_size` or `min_frequency` (here
left at their default values of 30,000 and 0) but the most important
part is to give the `special_tokens` we
plan to use later on (they are not used at all during training) so that
they get inserted in the vocabulary.
<Tip>
The order in which you write the special tokens list matters: here `"[UNK]"` will get the ID 0,
`"[CLS]"` will get the ID 1 and so forth.
</Tip>
We could train our tokenizer right now, but it wouldn't be optimal.
Without a pre-tokenizer that will split our inputs into words, we might
get tokens that overlap several words: for instance we could get an
`"it is"` token since those two words
often appear next to each other. Using a pre-tokenizer will ensure no
token is bigger than a word returned by the pre-tokenizer. Here we want
to train a subword BPE tokenizer, and we will use the easiest
pre-tokenizer possible by splitting on whitespace.
<tokenizerslangcontent>
<python>
<literalinclude>
{"path": "../../bindings/python/tests/documentation/test_quicktour.py",
"language": "python",
"start-after": "START init_pretok",
"end-before": "END init_pretok",
"dedent": 8}
</literalinclude>
</python>
<rust>
<literalinclude>
{"path": "../../tokenizers/tests/documentation.rs",
"language": "rust",
"start-after": "START quicktour_init_pretok",
"end-before": "END quicktour_init_pretok",
"dedent": 4}
</literalinclude>
</rust>
<node>
<literalinclude>
{"path": "../../bindings/node/examples/documentation/quicktour.test.ts",
"language": "js",
"start-after": "START init_pretok",
"end-before": "END init_pretok",
"dedent": 8}
</literalinclude>
</node>
</tokenizerslangcontent>
Now, we can just call the `Tokenizer.train` method with any list of files we want to use:
<tokenizerslangcontent>
<python>
<literalinclude>
{"path": "../../bindings/python/tests/documentation/test_quicktour.py",
"language": "python",
"start-after": "START train",
"end-before": "END train",
"dedent": 8}
</literalinclude>
</python>
<rust>
<literalinclude>
{"path": "../../tokenizers/tests/documentation.rs",
"language": "rust",
"start-after": "START quicktour_train",
"end-before": "END quicktour_train",
"dedent": 4}
</literalinclude>
</rust>
<node>
<literalinclude>
{"path": "../../bindings/node/examples/documentation/quicktour.test.ts",
"language": "js",
"start-after": "START train",
"end-before": "END train",
"dedent": 8}
</literalinclude>
</node>
</tokenizerslangcontent>
This should only take a few seconds to train our tokenizer on the full
wikitext dataset! To save the tokenizer in one file that contains all
its configuration and vocabulary, just use the
`Tokenizer.save` method:
<tokenizerslangcontent>
<python>
<literalinclude>
{"path": "../../bindings/python/tests/documentation/test_quicktour.py",
"language": "python",
"start-after": "START save",
"end-before": "END save",
"dedent": 8}
</literalinclude>
</python>
<rust>
<literalinclude>
{"path": "../../tokenizers/tests/documentation.rs",
"language": "rust",
"start-after": "START quicktour_save",
"end-before": "END quicktour_save",
"dedent": 4}
</literalinclude>
</rust>
<node>
<literalinclude>
{"path": "../../bindings/node/examples/documentation/quicktour.test.ts",
"language": "js",
"start-after": "START save",
"end-before": "END save",
"dedent": 8}
</literalinclude>
</node>
</tokenizerslangcontent>
and you can reload your tokenizer from that file with the
`Tokenizer.from_file`
`classmethod`:
<tokenizerslangcontent>
<python>
<literalinclude>
{"path": "../../bindings/python/tests/documentation/test_quicktour.py",
"language": "python",
"start-after": "START reload_tokenizer",
"end-before": "END reload_tokenizer",
"dedent": 12}
</literalinclude>
</python>
<rust>
<literalinclude>
{"path": "../../tokenizers/tests/documentation.rs",
"language": "rust",
"start-after": "START quicktour_reload_tokenizer",
"end-before": "END quicktour_reload_tokenizer",
"dedent": 4}
</literalinclude>
</rust>
<node>
<literalinclude>
{"path": "../../bindings/node/examples/documentation/quicktour.test.ts",
"language": "js",
"start-after": "START reload_tokenizer",
"end-before": "END reload_tokenizer",
"dedent": 8}
</literalinclude>
</node>
</tokenizerslangcontent>
### Using the tokenizer
Now that we have trained a tokenizer, we can use it on any text we want
with the `Tokenizer.encode` method:
<tokenizerslangcontent>
<python>
<literalinclude>
{"path": "../../bindings/python/tests/documentation/test_quicktour.py",
"language": "python",
"start-after": "START encode",
"end-before": "END encode",
"dedent": 8}
</literalinclude>
</python>
<rust>
<literalinclude>
{"path": "../../tokenizers/tests/documentation.rs",
"language": "rust",
"start-after": "START quicktour_encode",
"end-before": "END quicktour_encode",
"dedent": 4}
</literalinclude>
</rust>
<node>
<literalinclude>
{"path": "../../bindings/node/examples/documentation/quicktour.test.ts",
"language": "js",
"start-after": "START encode",
"end-before": "END encode",
"dedent": 8}
</literalinclude>
</node>
</tokenizerslangcontent>
This applied the full pipeline of the tokenizer on the text, returning
an `Encoding` object. To learn more
about this pipeline, and how to apply (or customize) parts of it, check out [this page](pipeline).
This `Encoding` object then has all the
attributes you need for your deep learning model (or other). The
`tokens` attribute contains the
segmentation of your text in tokens:
<tokenizerslangcontent>
<python>
<literalinclude>
{"path": "../../bindings/python/tests/documentation/test_quicktour.py",
"language": "python",
"start-after": "START print_tokens",
"end-before": "END print_tokens",
"dedent": 8}
</literalinclude>
</python>
<rust>
<literalinclude>
{"path": "../../tokenizers/tests/documentation.rs",
"language": "rust",
"start-after": "START quicktour_print_tokens",
"end-before": "END quicktour_print_tokens",
"dedent": 4}
</literalinclude>
</rust>
<node>
<literalinclude>
{"path": "../../bindings/node/examples/documentation/quicktour.test.ts",
"language": "js",
"start-after": "START print_tokens",
"end-before": "END print_tokens",
"dedent": 8}
</literalinclude>
</node>
</tokenizerslangcontent>
Similarly, the `ids` attribute will
contain the index of each of those tokens in the tokenizer's
vocabulary:
<tokenizerslangcontent>
<python>
<literalinclude>
{"path": "../../bindings/python/tests/documentation/test_quicktour.py",
"language": "python",
"start-after": "START print_ids",
"end-before": "END print_ids",
"dedent": 8}
</literalinclude>
</python>
<rust>
<literalinclude>
{"path": "../../tokenizers/tests/documentation.rs",
"language": "rust",
"start-after": "START quicktour_print_ids",
"end-before": "END quicktour_print_ids",
"dedent": 4}
</literalinclude>
</rust>
<node>
<literalinclude>
{"path": "../../bindings/node/examples/documentation/quicktour.test.ts",
"language": "js",
"start-after": "START print_ids",
"end-before": "END print_ids",
"dedent": 8}
</literalinclude>
</node>
</tokenizerslangcontent>
An important feature of the 🤗 Tokenizers library is that it comes with
full alignment tracking, meaning you can always get the part of your
original sentence that corresponds to a given token. Those are stored in
the `offsets` attribute of our
`Encoding` object. For instance, let's
assume we would want to find back what caused the
`"[UNK]"` token to appear, which is the
token at index 9 in the list, we can just ask for the offset at the
index:
<tokenizerslangcontent>
<python>
<literalinclude>
{"path": "../../bindings/python/tests/documentation/test_quicktour.py",
"language": "python",
"start-after": "START print_offsets",
"end-before": "END print_offsets",
"dedent": 8}
</literalinclude>
</python>
<rust>
<literalinclude>
{"path": "../../tokenizers/tests/documentation.rs",
"language": "rust",
"start-after": "START quicktour_print_offsets",
"end-before": "END quicktour_print_offsets",
"dedent": 4}
</literalinclude>
</rust>
<node>
<literalinclude>
{"path": "../../bindings/node/examples/documentation/quicktour.test.ts",
"language": "js",
"start-after": "START print_offsets",
"end-before": "END print_offsets",
"dedent": 8}
</literalinclude>
</node>
</tokenizerslangcontent>
and those are the indices that correspond to the emoji in the original
sentence:
<tokenizerslangcontent>
<python>
<literalinclude>
{"path": "../../bindings/python/tests/documentation/test_quicktour.py",
"language": "python",
"start-after": "START use_offsets",
"end-before": "END use_offsets",
"dedent": 8}
</literalinclude>
</python>
<rust>
<literalinclude>
{"path": "../../tokenizers/tests/documentation.rs",
"language": "rust",
"start-after": "START quicktour_use_offsets",
"end-before": "END quicktour_use_offsets",
"dedent": 4}
</literalinclude>
</rust>
<node>
<literalinclude>
{"path": "../../bindings/node/examples/documentation/quicktour.test.ts",
"language": "js",
"start-after": "START use_offsets",
"end-before": "END use_offsets",
"dedent": 8}
</literalinclude>
</node>
</tokenizerslangcontent>
### Post-processing
We might want our tokenizer to automatically add special tokens, like
`"[CLS]"` or `"[SEP]"`. To do this, we use a post-processor.
`TemplateProcessing` is the most
commonly used, you just have to specify a template for the processing of
single sentences and pairs of sentences, along with the special tokens
and their IDs.
When we built our tokenizer, we set `"[CLS]"` and `"[SEP]"` in positions 1
and 2 of our list of special tokens, so this should be their IDs. To
double-check, we can use the `Tokenizer.token_to_id` method:
<tokenizerslangcontent>
<python>
<literalinclude>
{"path": "../../bindings/python/tests/documentation/test_quicktour.py",
"language": "python",
"start-after": "START check_sep",
"end-before": "END check_sep",
"dedent": 8}
</literalinclude>
</python>
<rust>
<literalinclude>
{"path": "../../tokenizers/tests/documentation.rs",
"language": "rust",
"start-after": "START quicktour_check_sep",
"end-before": "END quicktour_check_sep",
"dedent": 4}
</literalinclude>
</rust>
<node>
<literalinclude>
{"path": "../../bindings/node/examples/documentation/quicktour.test.ts",
"language": "js",
"start-after": "START check_sep",
"end-before": "END check_sep",
"dedent": 8}
</literalinclude>
</node>
</tokenizerslangcontent>
Here is how we can set the post-processing to give us the traditional
BERT inputs:
<tokenizerslangcontent>
<python>
<literalinclude>
{"path": "../../bindings/python/tests/documentation/test_quicktour.py",
"language": "python",
"start-after": "START init_template_processing",
"end-before": "END init_template_processing",
"dedent": 8}
</literalinclude>
</python>
<rust>
<literalinclude>
{"path": "../../tokenizers/tests/documentation.rs",
"language": "rust",
"start-after": "START quicktour_init_template_processing",
"end-before": "END quicktour_init_template_processing",
"dedent": 4}
</literalinclude>
</rust>
<node>
<literalinclude>
{"path": "../../bindings/node/examples/documentation/quicktour.test.ts",
"language": "js",
"start-after": "START init_template_processing",
"end-before": "END init_template_processing",
"dedent": 8}
</literalinclude>
</node>
</tokenizerslangcontent>
Let's go over this snippet of code in more details. First we specify
the template for single sentences: those should have the form
`"[CLS] $A [SEP]"` where
`$A` represents our sentence.
Then, we specify the template for sentence pairs, which should have the
form `"[CLS] $A [SEP] $B [SEP]"` where
`$A` represents the first sentence and
`$B` the second one. The
`:1` added in the template represent the `type IDs` we want for each part of our input: it defaults
to 0 for everything (which is why we don't have
`$A:0`) and here we set it to 1 for the
tokens of the second sentence and the last `"[SEP]"` token.
Lastly, we specify the special tokens we used and their IDs in our
tokenizer's vocabulary.
To check out this worked properly, let's try to encode the same
sentence as before:
<tokenizerslangcontent>
<python>
<literalinclude>
{"path": "../../bindings/python/tests/documentation/test_quicktour.py",
"language": "python",
"start-after": "START print_special_tokens",
"end-before": "END print_special_tokens",
"dedent": 8}
</literalinclude>
</python>
<rust>
<literalinclude>
{"path": "../../tokenizers/tests/documentation.rs",
"language": "rust",
"start-after": "START quicktour_print_special_tokens",
"end-before": "END quicktour_print_special_tokens",
"dedent": 4}
</literalinclude>
</rust>
<node>
<literalinclude>
{"path": "../../bindings/node/examples/documentation/quicktour.test.ts",
"language": "js",
"start-after": "START print_special_tokens",
"end-before": "END print_special_tokens",
"dedent": 8}
</literalinclude>
</node>
</tokenizerslangcontent>
To check the results on a pair of sentences, we just pass the two
sentences to `Tokenizer.encode`:
<tokenizerslangcontent>
<python>
<literalinclude>
{"path": "../../bindings/python/tests/documentation/test_quicktour.py",
"language": "python",
"start-after": "START print_special_tokens_pair",
"end-before": "END print_special_tokens_pair",
"dedent": 8}
</literalinclude>
</python>
<rust>
<literalinclude>
{"path": "../../tokenizers/tests/documentation.rs",
"language": "rust",
"start-after": "START quicktour_print_special_tokens_pair",
"end-before": "END quicktour_print_special_tokens_pair",
"dedent": 4}
</literalinclude>
</rust>
<node>
<literalinclude>
{"path": "../../bindings/node/examples/documentation/quicktour.test.ts",
"language": "js",
"start-after": "START print_special_tokens_pair",
"end-before": "END print_special_tokens_pair",
"dedent": 8}
</literalinclude>
</node>
</tokenizerslangcontent>
You can then check the type IDs attributed to each token is correct with
<tokenizerslangcontent>
<python>
<literalinclude>
{"path": "../../bindings/python/tests/documentation/test_quicktour.py",
"language": "python",
"start-after": "START print_type_ids",
"end-before": "END print_type_ids",
"dedent": 8}
</literalinclude>
</python>
<rust>
<literalinclude>
{"path": "../../tokenizers/tests/documentation.rs",
"language": "rust",
"start-after": "START quicktour_print_type_ids",
"end-before": "END quicktour_print_type_ids",
"dedent": 4}
</literalinclude>
</rust>
<node>
<literalinclude>
{"path": "../../bindings/node/examples/documentation/quicktour.test.ts",
"language": "js",
"start-after": "START print_type_ids",
"end-before": "END print_type_ids",
"dedent": 8}
</literalinclude>
</node>
</tokenizerslangcontent>
If you save your tokenizer with `Tokenizer.save`, the post-processor will be saved along.
### Encoding multiple sentences in a batch
To get the full speed of the 🤗 Tokenizers library, it's best to
process your texts by batches by using the
`Tokenizer.encode_batch` method:
<tokenizerslangcontent>
<python>
<literalinclude>
{"path": "../../bindings/python/tests/documentation/test_quicktour.py",
"language": "python",
"start-after": "START encode_batch",
"end-before": "END encode_batch",
"dedent": 8}
</literalinclude>
</python>
<rust>
<literalinclude>
{"path": "../../tokenizers/tests/documentation.rs",
"language": "rust",
"start-after": "START quicktour_encode_batch",
"end-before": "END quicktour_encode_batch",
"dedent": 4}
</literalinclude>
</rust>
<node>
<literalinclude>
{"path": "../../bindings/node/examples/documentation/quicktour.test.ts",
"language": "js",
"start-after": "START encode_batch",
"end-before": "END encode_batch",
"dedent": 8}
</literalinclude>
</node>
</tokenizerslangcontent>
The output is then a list of `Encoding`
objects like the ones we saw before. You can process together as many
texts as you like, as long as it fits in memory.
To process a batch of sentences pairs, pass two lists to the
`Tokenizer.encode_batch` method: the
list of sentences A and the list of sentences B:
<tokenizerslangcontent>
<python>
<literalinclude>
{"path": "../../bindings/python/tests/documentation/test_quicktour.py",
"language": "python",
"start-after": "START encode_batch_pair",
"end-before": "END encode_batch_pair",
"dedent": 8}
</literalinclude>
</python>
<rust>
<literalinclude>
{"path": "../../tokenizers/tests/documentation.rs",
"language": "rust",
"start-after": "START quicktour_encode_batch_pair",
"end-before": "END quicktour_encode_batch_pair",
"dedent": 4}
</literalinclude>
</rust>
<node>
<literalinclude>
{"path": "../../bindings/node/examples/documentation/quicktour.test.ts",
"language": "js",
"start-after": "START encode_batch_pair",
"end-before": "END encode_batch_pair",
"dedent": 8}
</literalinclude>
</node>
</tokenizerslangcontent>
When encoding multiple sentences, you can automatically pad the outputs
to the longest sentence present by using
`Tokenizer.enable_padding`, with the
`pad_token` and its ID (which we can
double-check the id for the padding token with
`Tokenizer.token_to_id` like before):
<tokenizerslangcontent>
<python>
<literalinclude>
{"path": "../../bindings/python/tests/documentation/test_quicktour.py",
"language": "python",
"start-after": "START enable_padding",
"end-before": "END enable_padding",
"dedent": 8}
</literalinclude>
</python>
<rust>
<literalinclude>
{"path": "../../tokenizers/tests/documentation.rs",
"language": "rust",
"start-after": "START quicktour_enable_padding",
"end-before": "END quicktour_enable_padding",
"dedent": 4}
</literalinclude>
</rust>
<node>
<literalinclude>
{"path": "../../bindings/node/examples/documentation/quicktour.test.ts",
"language": "js",
"start-after": "START enable_padding",
"end-before": "END enable_padding",
"dedent": 8}
</literalinclude>
</node>
</tokenizerslangcontent>
We can set the `direction` of the padding
(defaults to the right) or a given `length` if we want to pad every sample to that specific number (here
we leave it unset to pad to the size of the longest text).
<tokenizerslangcontent>
<python>
<literalinclude>
{"path": "../../bindings/python/tests/documentation/test_quicktour.py",
"language": "python",
"start-after": "START print_batch_tokens",
"end-before": "END print_batch_tokens",
"dedent": 8}
</literalinclude>
</python>
<rust>
<literalinclude>
{"path": "../../tokenizers/tests/documentation.rs",
"language": "rust",
"start-after": "START quicktour_print_batch_tokens",
"end-before": "END quicktour_print_batch_tokens",
"dedent": 4}
</literalinclude>
</rust>
<node>
<literalinclude>
{"path": "../../bindings/node/examples/documentation/quicktour.test.ts",
"language": "js",
"start-after": "START print_batch_tokens",
"end-before": "END print_batch_tokens",
"dedent": 8}
</literalinclude>
</node>
</tokenizerslangcontent>
In this case, the `attention mask` generated by the
tokenizer takes the padding into account:
<tokenizerslangcontent>
<python>
<literalinclude>
{"path": "../../bindings/python/tests/documentation/test_quicktour.py",
"language": "python",
"start-after": "START print_attention_mask",
"end-before": "END print_attention_mask",
"dedent": 8}
</literalinclude>
</python>
<rust>
<literalinclude>
{"path": "../../tokenizers/tests/documentation.rs",
"language": "rust",
"start-after": "START quicktour_print_attention_mask",
"end-before": "END quicktour_print_attention_mask",
"dedent": 4}
</literalinclude>
</rust>
<node>
<literalinclude>
{"path": "../../bindings/node/examples/documentation/quicktour.test.ts",
"language": "js",
"start-after": "START print_attention_mask",
"end-before": "END print_attention_mask",
"dedent": 8}
</literalinclude>
</node>
</tokenizerslangcontent>
## Pretrained
<tokenizerslangcontent>
<python>
### Using a pretrained tokenizer
You can load any tokenizer from the Hugging Face Hub as long as a
`tokenizer.json` file is available in the repository.
```python
from tokenizers import Tokenizer
tokenizer = Tokenizer.from_pretrained("bert-base-uncased")
```
### Importing a pretrained tokenizer from legacy vocabulary files
You can also import a pretrained tokenizer directly in, as long as you
have its vocabulary file. For instance, here is how to import the
classic pretrained BERT tokenizer:
```python
from tokenizers import BertWordPieceTokenizer
tokenizer = BertWordPieceTokenizer("bert-base-uncased-vocab.txt", lowercase=True)
```
as long as you have downloaded the file `bert-base-uncased-vocab.txt` with
```bash
wget https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-uncased-vocab.txt
```
</python>
</tokenizerslangcontent> | 0 |
hf_public_repos/tokenizers/docs | hf_public_repos/tokenizers/docs/source-doc-builder/training_from_memory.mdx | # Training from memory
In the [Quicktour](quicktour), we saw how to build and train a
tokenizer using text files, but we can actually use any Python Iterator.
In this section we'll see a few different ways of training our
tokenizer.
For all the examples listed below, we'll use the same [`~tokenizers.Tokenizer`] and
[`~tokenizers.trainers.Trainer`], built as
following:
<literalinclude>
{"path": "../../bindings/python/tests/documentation/test_tutorial_train_from_iterators.py",
"language": "python",
"start-after": "START init_tokenizer_trainer",
"end-before": "END init_tokenizer_trainer",
"dedent": 8}
</literalinclude>
This tokenizer is based on the [`~tokenizers.models.Unigram`] model. It
takes care of normalizing the input using the NFKC Unicode normalization
method, and uses a [`~tokenizers.pre_tokenizers.ByteLevel`] pre-tokenizer with the corresponding decoder.
For more information on the components used here, you can check
[here](components).
## The most basic way
As you probably guessed already, the easiest way to train our tokenizer
is by using a `List`{.interpreted-text role="obj"}:
<literalinclude>
{"path": "../../bindings/python/tests/documentation/test_tutorial_train_from_iterators.py",
"language": "python",
"start-after": "START train_basic",
"end-before": "END train_basic",
"dedent": 8}
</literalinclude>
Easy, right? You can use anything working as an iterator here, be it a
`List`{.interpreted-text role="obj"}, `Tuple`{.interpreted-text
role="obj"}, or a `np.Array`{.interpreted-text role="obj"}. Anything
works as long as it provides strings.
## Using the 🤗 Datasets library
An awesome way to access one of the many datasets that exist out there
is by using the 🤗 Datasets library. For more information about it, you
should check [the official documentation
here](https://huggingface.co/docs/datasets/).
Let's start by loading our dataset:
<literalinclude>
{"path": "../../bindings/python/tests/documentation/test_tutorial_train_from_iterators.py",
"language": "python",
"start-after": "START load_dataset",
"end-before": "END load_dataset",
"dedent": 8}
</literalinclude>
The next step is to build an iterator over this dataset. The easiest way
to do this is probably by using a generator:
<literalinclude>
{"path": "../../bindings/python/tests/documentation/test_tutorial_train_from_iterators.py",
"language": "python",
"start-after": "START def_batch_iterator",
"end-before": "END def_batch_iterator",
"dedent": 8}
</literalinclude>
As you can see here, for improved efficiency we can actually provide a
batch of examples used to train, instead of iterating over them one by
one. By doing so, we can expect performances very similar to those we
got while training directly from files.
With our iterator ready, we just need to launch the training. In order
to improve the look of our progress bars, we can specify the total
length of the dataset:
<literalinclude>
{"path": "../../bindings/python/tests/documentation/test_tutorial_train_from_iterators.py",
"language": "python",
"start-after": "START train_datasets",
"end-before": "END train_datasets",
"dedent": 8}
</literalinclude>
And that's it!
## Using gzip files
Since gzip files in Python can be used as iterators, it is extremely
simple to train on such files:
<literalinclude>
{"path": "../../bindings/python/tests/documentation/test_tutorial_train_from_iterators.py",
"language": "python",
"start-after": "START single_gzip",
"end-before": "END single_gzip",
"dedent": 8}
</literalinclude>
Now if we wanted to train from multiple gzip files, it wouldn't be much
harder:
<literalinclude>
{"path": "../../bindings/python/tests/documentation/test_tutorial_train_from_iterators.py",
"language": "python",
"start-after": "START multi_gzip",
"end-before": "END multi_gzip",
"dedent": 8}
</literalinclude>
And voilà!
| 0 |
hf_public_repos/tokenizers/docs | hf_public_repos/tokenizers/docs/source-doc-builder/components.mdx | # Components
When building a Tokenizer, you can attach various types of components to
this Tokenizer in order to customize its behavior. This page lists most
provided components.
## Normalizers
A `Normalizer` is in charge of pre-processing the input string in order
to normalize it as relevant for a given use case. Some common examples
of normalization are the Unicode normalization algorithms (NFD, NFKD,
NFC & NFKC), lowercasing etc... The specificity of `tokenizers` is that
we keep track of the alignment while normalizing. This is essential to
allow mapping from the generated tokens back to the input text.
The `Normalizer` is optional.
<tokenizerslangcontent>
<python>
| Name | Description | Example |
| :--- | :--- | :--- |
| NFD | NFD unicode normalization | |
| NFKD | NFKD unicode normalization | |
| NFC | NFC unicode normalization | |
| NFKC | NFKC unicode normalization | |
| Lowercase | Replaces all uppercase to lowercase | Input: `HELLO ὈΔΥΣΣΕΎΣ` <br> Output: `hello`ὀδυσσεύς` |
| Strip | Removes all whitespace characters on the specified sides (left, right or both) of the input | Input: `"`hi`"` <br> Output: `"hi"` |
| StripAccents | Removes all accent symbols in unicode (to be used with NFD for consistency) | Input: `é` <br> Ouput: `e` |
| Replace | Replaces a custom string or regexp and changes it with given content | `Replace("a", "e")` will behave like this: <br> Input: `"banana"` <br> Ouput: `"benene"` |
| BertNormalizer | Provides an implementation of the Normalizer used in the original BERT. Options that can be set are: <ul> <li>clean_text</li> <li>handle_chinese_chars</li> <li>strip_accents</li> <li>lowercase</li> </ul> | |
| Sequence | Composes multiple normalizers that will run in the provided order | `Sequence([NFKC(), Lowercase()])` |
</python>
<rust>
| Name | Description | Example |
| :--- | :--- | :--- |
| NFD | NFD unicode normalization | |
| NFKD | NFKD unicode normalization | |
| NFC | NFC unicode normalization | |
| NFKC | NFKC unicode normalization | |
| Lowercase | Replaces all uppercase to lowercase | Input: `HELLO ὈΔΥΣΣΕΎΣ` <br> Output: `hello`ὀδυσσεύς` |
| Strip | Removes all whitespace characters on the specified sides (left, right or both) of the input | Input: `"`hi`"` <br> Output: `"hi"` |
| StripAccents | Removes all accent symbols in unicode (to be used with NFD for consistency) | Input: `é` <br> Ouput: `e` |
| Replace | Replaces a custom string or regexp and changes it with given content | `Replace("a", "e")` will behave like this: <br> Input: `"banana"` <br> Ouput: `"benene"` |
| BertNormalizer | Provides an implementation of the Normalizer used in the original BERT. Options that can be set are: <ul> <li>clean_text</li> <li>handle_chinese_chars</li> <li>strip_accents</li> <li>lowercase</li> </ul> | |
| Sequence | Composes multiple normalizers that will run in the provided order | `Sequence::new(vec![NFKC, Lowercase])` |
</rust>
<node>
| Name | Description | Example |
| :--- | :--- | :--- |
| NFD | NFD unicode normalization | |
| NFKD | NFKD unicode normalization | |
| NFC | NFC unicode normalization | |
| NFKC | NFKC unicode normalization | |
| Lowercase | Replaces all uppercase to lowercase | Input: `HELLO ὈΔΥΣΣΕΎΣ` <br> Output: `hello`ὀδυσσεύς` |
| Strip | Removes all whitespace characters on the specified sides (left, right or both) of the input | Input: `"`hi`"` <br> Output: `"hi"` |
| StripAccents | Removes all accent symbols in unicode (to be used with NFD for consistency) | Input: `é` <br> Ouput: `e` |
| Replace | Replaces a custom string or regexp and changes it with given content | `Replace("a", "e")` will behave like this: <br> Input: `"banana"` <br> Ouput: `"benene"` |
| BertNormalizer | Provides an implementation of the Normalizer used in the original BERT. Options that can be set are: <ul> <li>cleanText</li> <li>handleChineseChars</li> <li>stripAccents</li> <li>lowercase</li> </ul> | |
| Sequence | Composes multiple normalizers that will run in the provided order | |
</node>
</tokenizerslangcontent>
## Pre-tokenizers
The `PreTokenizer` takes care of splitting the input according to a set
of rules. This pre-processing lets you ensure that the underlying
`Model` does not build tokens across multiple "splits". For example if
you don't want to have whitespaces inside a token, then you can have a
`PreTokenizer` that splits on these whitespaces.
You can easily combine multiple `PreTokenizer` together using a
`Sequence` (see below). The `PreTokenizer` is also allowed to modify the
string, just like a `Normalizer` does. This is necessary to allow some
complicated algorithms that require to split before normalizing (e.g.
the ByteLevel)
<tokenizerslangcontent>
<python>
| Name | Description | Example |
| :--- | :--- | :--- |
| ByteLevel | Splits on whitespaces while remapping all the bytes to a set of visible characters. This technique as been introduced by OpenAI with GPT-2 and has some more or less nice properties: <ul> <li>Since it maps on bytes, a tokenizer using this only requires **256** characters as initial alphabet (the number of values a byte can have), as opposed to the 130,000+ Unicode characters.</li> <li>A consequence of the previous point is that it is absolutely unnecessary to have an unknown token using this since we can represent anything with 256 tokens (Youhou!! 🎉🎉)</li> <li>For non ascii characters, it gets completely unreadable, but it works nonetheless!</li> </ul> | Input: `"Hello my friend, how are you?"` <br> Ouput: `"Hello", "Ġmy", Ġfriend", ",", "Ġhow", "Ġare", "Ġyou", "?"` |
| Whitespace | Splits on word boundaries (using the following regular expression: `\w+|[^\w\s]+` | Input: `"Hello there!"` <br> Output: `"Hello", "there", "!"` |
| WhitespaceSplit | Splits on any whitespace character | Input: `"Hello there!"` <br> Output: `"Hello", "there!"` |
| Punctuation | Will isolate all punctuation characters | Input: `"Hello?"` <br> Ouput: `"Hello", "?"` |
| Metaspace | Splits on whitespaces and replaces them with a special char “▁” (U+2581) | Input: `"Hello there"` <br> Ouput: `"Hello", "▁there"` |
| CharDelimiterSplit | Splits on a given character | Example with `x`: <br> Input: `"Helloxthere"` <br> Ouput: `"Hello", "there"` |
| Digits | Splits the numbers from any other characters. | Input: `"Hello123there"` <br> Output: ``"Hello", "123", "there"`` |
| Split | Versatile pre-tokenizer that splits on provided pattern and according to provided behavior. The pattern can be inverted if necessary. <ul> <li>pattern should be either a custom string or regexp.</li> <li>behavior should be one of: <ul><li>removed</li><li>isolated</li><li>merged_with_previous</li><li>merged_with_next</li><li>contiguous</li></ul></li> <li>invert should be a boolean flag.</li> </ul> | Example with pattern = ` `, behavior = `"isolated"`, invert = `False`: <br> Input: `"Hello, how are you?"` <br> Output: `"Hello,", " ", "how", " ", "are", " ", "you?"` |
| Sequence | Lets you compose multiple `PreTokenizer` that will be run in the given order | `Sequence([Punctuation(), WhitespaceSplit()])` |
</python>
<rust>
| Name | Description | Example |
| :--- | :--- | :--- |
| ByteLevel | Splits on whitespaces while remapping all the bytes to a set of visible characters. This technique as been introduced by OpenAI with GPT-2 and has some more or less nice properties: <ul> <li>Since it maps on bytes, a tokenizer using this only requires **256** characters as initial alphabet (the number of values a byte can have), as opposed to the 130,000+ Unicode characters.</li> <li>A consequence of the previous point is that it is absolutely unnecessary to have an unknown token using this since we can represent anything with 256 tokens (Youhou!! 🎉🎉)</li> <li>For non ascii characters, it gets completely unreadable, but it works nonetheless!</li> </ul> | Input: `"Hello my friend, how are you?"` <br> Ouput: `"Hello", "Ġmy", Ġfriend", ",", "Ġhow", "Ġare", "Ġyou", "?"` |
| Whitespace | Splits on word boundaries (using the following regular expression: `\w+|[^\w\s]+` | Input: `"Hello there!"` <br> Output: `"Hello", "there", "!"` |
| WhitespaceSplit | Splits on any whitespace character | Input: `"Hello there!"` <br> Output: `"Hello", "there!"` |
| Punctuation | Will isolate all punctuation characters | Input: `"Hello?"` <br> Ouput: `"Hello", "?"` |
| Metaspace | Splits on whitespaces and replaces them with a special char “▁” (U+2581) | Input: `"Hello there"` <br> Ouput: `"Hello", "▁there"` |
| CharDelimiterSplit | Splits on a given character | Example with `x`: <br> Input: `"Helloxthere"` <br> Ouput: `"Hello", "there"` |
| Digits | Splits the numbers from any other characters. | Input: `"Hello123there"` <br> Output: ``"Hello", "123", "there"`` |
| Split | Versatile pre-tokenizer that splits on provided pattern and according to provided behavior. The pattern can be inverted if necessary. <ul> <li>pattern should be either a custom string or regexp.</li> <li>behavior should be one of: <ul><li>Removed</li><li>Isolated</li><li>MergedWithPrevious</li><li>MergedWithNext</li><li>Contiguous</li></ul></li> <li>invert should be a boolean flag.</li> </ul> | Example with pattern = ` `, behavior = `"isolated"`, invert = `False`: <br> Input: `"Hello, how are you?"` <br> Output: `"Hello,", " ", "how", " ", "are", " ", "you?"` |
| Sequence | Lets you compose multiple `PreTokenizer` that will be run in the given order | `Sequence::new(vec![Punctuation, WhitespaceSplit])` |
</rust>
<node>
| Name | Description | Example |
| :--- | :--- | :--- |
| ByteLevel | Splits on whitespaces while remapping all the bytes to a set of visible characters. This technique as been introduced by OpenAI with GPT-2 and has some more or less nice properties: <ul> <li>Since it maps on bytes, a tokenizer using this only requires **256** characters as initial alphabet (the number of values a byte can have), as opposed to the 130,000+ Unicode characters.</li> <li>A consequence of the previous point is that it is absolutely unnecessary to have an unknown token using this since we can represent anything with 256 tokens (Youhou!! 🎉🎉)</li> <li>For non ascii characters, it gets completely unreadable, but it works nonetheless!</li> </ul> | Input: `"Hello my friend, how are you?"` <br> Ouput: `"Hello", "Ġmy", Ġfriend", ",", "Ġhow", "Ġare", "Ġyou", "?"` |
| Whitespace | Splits on word boundaries (using the following regular expression: `\w+|[^\w\s]+` | Input: `"Hello there!"` <br> Output: `"Hello", "there", "!"` |
| WhitespaceSplit | Splits on any whitespace character | Input: `"Hello there!"` <br> Output: `"Hello", "there!"` |
| Punctuation | Will isolate all punctuation characters | Input: `"Hello?"` <br> Ouput: `"Hello", "?"` |
| Metaspace | Splits on whitespaces and replaces them with a special char “▁” (U+2581) | Input: `"Hello there"` <br> Ouput: `"Hello", "▁there"` |
| CharDelimiterSplit | Splits on a given character | Example with `x`: <br> Input: `"Helloxthere"` <br> Ouput: `"Hello", "there"` |
| Digits | Splits the numbers from any other characters. | Input: `"Hello123there"` <br> Output: ``"Hello", "123", "there"`` |
| Split | Versatile pre-tokenizer that splits on provided pattern and according to provided behavior. The pattern can be inverted if necessary. <ul> <li>pattern should be either a custom string or regexp.</li> <li>behavior should be one of: <ul><li>removed</li><li>isolated</li><li>mergedWithPrevious</li><li>mergedWithNext</li><li>contiguous</li></ul></li> <li>invert should be a boolean flag.</li> </ul> | Example with pattern = ` `, behavior = `"isolated"`, invert = `False`: <br> Input: `"Hello, how are you?"` <br> Output: `"Hello,", " ", "how", " ", "are", " ", "you?"` |
| Sequence | Lets you compose multiple `PreTokenizer` that will be run in the given order | |
</node>
</tokenizerslangcontent>
## Models
Models are the core algorithms used to actually tokenize, and therefore,
they are the only mandatory component of a Tokenizer.
| Name | Description |
| :--- | :--- |
| WordLevel | This is the “classic” tokenization algorithm. It let’s you simply map words to IDs without anything fancy. This has the advantage of being really simple to use and understand, but it requires extremely large vocabularies for a good coverage. Using this `Model` requires the use of a `PreTokenizer`. No choice will be made by this model directly, it simply maps input tokens to IDs. |
| BPE | One of the most popular subword tokenization algorithm. The Byte-Pair-Encoding works by starting with characters, while merging those that are the most frequently seen together, thus creating new tokens. It then works iteratively to build new tokens out of the most frequent pairs it sees in a corpus. BPE is able to build words it has never seen by using multiple subword tokens, and thus requires smaller vocabularies, with less chances of having “unk” (unknown) tokens. |
| WordPiece | This is a subword tokenization algorithm quite similar to BPE, used mainly by Google in models like BERT. It uses a greedy algorithm, that tries to build long words first, splitting in multiple tokens when entire words don’t exist in the vocabulary. This is different from BPE that starts from characters, building bigger tokens as possible. It uses the famous `##` prefix to identify tokens that are part of a word (ie not starting a word). |
| Unigram | Unigram is also a subword tokenization algorithm, and works by trying to identify the best set of subword tokens to maximize the probability for a given sentence. This is different from BPE in the way that this is not deterministic based on a set of rules applied sequentially. Instead Unigram will be able to compute multiple ways of tokenizing, while choosing the most probable one. |
## Post-Processors
After the whole pipeline, we sometimes want to insert some special
tokens before feed a tokenized string into a model like "[CLS] My
horse is amazing [SEP]". The `PostProcessor` is the component doing
just that.
| Name | Description | Example |
| :--- | :--- | :--- |
| TemplateProcessing | Let’s you easily template the post processing, adding special tokens, and specifying the `type_id` for each sequence/special token. The template is given two strings representing the single sequence and the pair of sequences, as well as a set of special tokens to use. | Example, when specifying a template with these values:<br> <ul> <li> single: `"[CLS] $A [SEP]"` </li> <li> pair: `"[CLS] $A [SEP] $B [SEP]"` </li> <li> special tokens: <ul> <li>`"[CLS]"`</li> <li>`"[SEP]"`</li> </ul> </li> </ul> <br> Input: `("I like this", "but not this")` <br> Output: `"[CLS] I like this [SEP] but not this [SEP]"` |
## Decoders
The Decoder knows how to go from the IDs used by the Tokenizer, back to
a readable piece of text. Some `Normalizer` and `PreTokenizer` use
special characters or identifiers that need to be reverted for example.
| Name | Description |
| :--- | :--- |
| ByteLevel | Reverts the ByteLevel PreTokenizer. This PreTokenizer encodes at the byte-level, using a set of visible Unicode characters to represent each byte, so we need a Decoder to revert this process and get something readable again. |
| Metaspace | Reverts the Metaspace PreTokenizer. This PreTokenizer uses a special identifer `▁` to identify whitespaces, and so this Decoder helps with decoding these. |
| WordPiece | Reverts the WordPiece Model. This model uses a special identifier `##` for continuing subwords, and so this Decoder helps with decoding these. |
| 0 |
hf_public_repos/tokenizers/docs/source-doc-builder | hf_public_repos/tokenizers/docs/source-doc-builder/api/post-processors.mdx | # Post-processors
<tokenizerslangcontent>
<python>
## BertProcessing
[[autodoc]] tokenizers.processors.BertProcessing
## ByteLevel
[[autodoc]] tokenizers.processors.ByteLevel
## RobertaProcessing
[[autodoc]] tokenizers.processors.RobertaProcessing
## TemplateProcessing
[[autodoc]] tokenizers.processors.TemplateProcessing
</python>
<rust>
The Rust API Reference is available directly on the [Docs.rs](https://docs.rs/tokenizers/latest/tokenizers/) website.
</rust>
<node>
The node API has not been documented yet.
</node>
</tokenizerslangcontent> | 0 |
hf_public_repos/tokenizers/docs/source-doc-builder | hf_public_repos/tokenizers/docs/source-doc-builder/api/tokenizer.mdx | # Tokenizer
<tokenizerslangcontent>
<python>
## Tokenizer
[[autodoc]] tokenizers.Tokenizer
- all
- decoder
- model
- normalizer
- padding
- post_processor
- pre_tokenizer
- truncation
</python>
<rust>
The Rust API Reference is available directly on the [Docs.rs](https://docs.rs/tokenizers/latest/tokenizers/) website.
</rust>
<node>
The node API has not been documented yet.
</node>
</tokenizerslangcontent> | 0 |
hf_public_repos/tokenizers/docs/source-doc-builder | hf_public_repos/tokenizers/docs/source-doc-builder/api/encoding.mdx | # Encoding
<tokenizerslangcontent>
<python>
## Encoding
[[autodoc]] tokenizers.Encoding
- all
- attention_mask
- ids
- n_sequences
- offsets
- overflowing
- sequence_ids
- special_tokens_mask
- tokens
- type_ids
- word_ids
- words
</python>
<rust>
The Rust API Reference is available directly on the [Docs.rs](https://docs.rs/tokenizers/latest/tokenizers/) website.
</rust>
<node>
The node API has not been documented yet.
</node>
</tokenizerslangcontent> | 0 |
hf_public_repos/tokenizers/docs/source-doc-builder | hf_public_repos/tokenizers/docs/source-doc-builder/api/decoders.mdx | # Decoders
<tokenizerslangcontent>
<python>
## BPEDecoder
[[autodoc]] tokenizers.decoders.BPEDecoder
## ByteLevel
[[autodoc]] tokenizers.decoders.ByteLevel
## CTC
[[autodoc]] tokenizers.decoders.CTC
## Metaspace
[[autodoc]] tokenizers.decoders.Metaspace
## WordPiece
[[autodoc]] tokenizers.decoders.WordPiece
</python>
<rust>
The Rust API Reference is available directly on the [Docs.rs](https://docs.rs/tokenizers/latest/tokenizers/) website.
</rust>
<node>
The node API has not been documented yet.
</node>
</tokenizerslangcontent> | 0 |
hf_public_repos/tokenizers/docs/source-doc-builder | hf_public_repos/tokenizers/docs/source-doc-builder/api/models.mdx | # Models
<tokenizerslangcontent>
<python>
## BPE
[[autodoc]] tokenizers.models.BPE
## Model
[[autodoc]] tokenizers.models.Model
## Unigram
[[autodoc]] tokenizers.models.Unigram
## WordLevel
[[autodoc]] tokenizers.models.WordLevel
## WordPiece
[[autodoc]] tokenizers.models.WordPiece
</python>
<rust>
The Rust API Reference is available directly on the [Docs.rs](https://docs.rs/tokenizers/latest/tokenizers/) website.
</rust>
<node>
The node API has not been documented yet.
</node>
</tokenizerslangcontent> | 0 |
hf_public_repos/tokenizers/docs/source-doc-builder | hf_public_repos/tokenizers/docs/source-doc-builder/api/trainers.mdx | # Trainers
<tokenizerslangcontent>
<python>
## BpeTrainer
[[autodoc]] tokenizers.trainers.BpeTrainer
## UnigramTrainer
[[autodoc]] tokenizers.trainers.UnigramTrainer
## WordLevelTrainer
[[autodoc]] tokenizers.trainers.WordLevelTrainer
## WordPieceTrainer
[[autodoc]] tokenizers.trainers.WordPieceTrainer
</python>
<rust>
The Rust API Reference is available directly on the [Docs.rs](https://docs.rs/tokenizers/latest/tokenizers/) website.
</rust>
<node>
The node API has not been documented yet.
</node>
</tokenizerslangcontent> | 0 |
hf_public_repos/tokenizers/docs/source-doc-builder | hf_public_repos/tokenizers/docs/source-doc-builder/api/visualizer.mdx | # Visualizer
<tokenizerslangcontent>
<python>
## Annotation
[[autodoc]] tokenizers.tools.Annotation
## EncodingVisualizer
[[autodoc]] tokenizers.tools.EncodingVisualizer
- __call__
</python>
<rust>
The Rust API Reference is available directly on the [Docs.rs](https://docs.rs/tokenizers/latest/tokenizers/) website.
</rust>
<node>
The node API has not been documented yet.
</node>
</tokenizerslangcontent> | 0 |
hf_public_repos/tokenizers/docs/source-doc-builder | hf_public_repos/tokenizers/docs/source-doc-builder/api/input-sequences.mdx | # Input Sequences
<tokenizerslangcontent>
<python>
These types represent all the different kinds of sequence that can be used as input of a Tokenizer.
Globally, any sequence can be either a string or a list of strings, according to the operating
mode of the tokenizer: `raw text` vs `pre-tokenized`.
## TextInputSequence[[tokenizers.TextInputSequence]]
<code>tokenizers.TextInputSequence</code>
A `str` that represents an input sequence
## PreTokenizedInputSequence[[tokenizers.PreTokenizedInputSequence]]
<code>tokenizers.PreTokenizedInputSequence</code>
A pre-tokenized input sequence. Can be one of:
- A `List` of `str`
- A `Tuple` of `str`
alias of `Union[List[str], Tuple[str]]`.
## InputSequence[[tokenizers.InputSequence]]
<code>tokenizers.InputSequence</code>
Represents all the possible types of input sequences for encoding. Can be:
- When `is_pretokenized=False`: [TextInputSequence](#tokenizers.TextInputSequence)
- When `is_pretokenized=True`: [PreTokenizedInputSequence](#tokenizers.PreTokenizedInputSequence)
alias of `Union[str, List[str], Tuple[str]]`.
</python>
<rust>
The Rust API Reference is available directly on the [Docs.rs](https://docs.rs/tokenizers/latest/tokenizers/) website.
</rust>
<node>
The node API has not been documented yet.
</node>
</tokenizerslangcontent> | 0 |
hf_public_repos/tokenizers/docs/source-doc-builder | hf_public_repos/tokenizers/docs/source-doc-builder/api/added-tokens.mdx | # Added Tokens
<tokenizerslangcontent>
<python>
## AddedToken
[[autodoc]] tokenizers.AddedToken
- content
- lstrip
- normalized
- rstrip
- single_word
</python>
<rust>
The Rust API Reference is available directly on the [Docs.rs](https://docs.rs/tokenizers/latest/tokenizers/) website.
</rust>
<node>
The node API has not been documented yet.
</node>
</tokenizerslangcontent> | 0 |
hf_public_repos/tokenizers/docs/source-doc-builder | hf_public_repos/tokenizers/docs/source-doc-builder/api/pre-tokenizers.mdx | # Pre-tokenizers
<tokenizerslangcontent>
<python>
## BertPreTokenizer
[[autodoc]] tokenizers.pre_tokenizers.BertPreTokenizer
## ByteLevel
[[autodoc]] tokenizers.pre_tokenizers.ByteLevel
## CharDelimiterSplit
[[autodoc]] tokenizers.pre_tokenizers.CharDelimiterSplit
## Digits
[[autodoc]] tokenizers.pre_tokenizers.Digits
## Metaspace
[[autodoc]] tokenizers.pre_tokenizers.Metaspace
## PreTokenizer
[[autodoc]] tokenizers.pre_tokenizers.PreTokenizer
## Punctuation
[[autodoc]] tokenizers.pre_tokenizers.Punctuation
## Sequence
[[autodoc]] tokenizers.pre_tokenizers.Sequence
## Split
[[autodoc]] tokenizers.pre_tokenizers.Split
## UnicodeScripts
[[autodoc]] tokenizers.pre_tokenizers.UnicodeScripts
## Whitespace
[[autodoc]] tokenizers.pre_tokenizers.Whitespace
## WhitespaceSplit
[[autodoc]] tokenizers.pre_tokenizers.WhitespaceSplit
</python>
<rust>
The Rust API Reference is available directly on the [Docs.rs](https://docs.rs/tokenizers/latest/tokenizers/) website.
</rust>
<node>
The node API has not been documented yet.
</node>
</tokenizerslangcontent> | 0 |
hf_public_repos/tokenizers/docs/source-doc-builder | hf_public_repos/tokenizers/docs/source-doc-builder/api/normalizers.mdx | # Normalizers
<tokenizerslangcontent>
<python>
## BertNormalizer
[[autodoc]] tokenizers.normalizers.BertNormalizer
## Lowercase
[[autodoc]] tokenizers.normalizers.Lowercase
## NFC
[[autodoc]] tokenizers.normalizers.NFC
## NFD
[[autodoc]] tokenizers.normalizers.NFD
## NFKC
[[autodoc]] tokenizers.normalizers.NFKC
## NFKD
[[autodoc]] tokenizers.normalizers.NFKD
## Nmt
[[autodoc]] tokenizers.normalizers.Nmt
## Normalizer
[[autodoc]] tokenizers.normalizers.Normalizer
## Precompiled
[[autodoc]] tokenizers.normalizers.Precompiled
## Replace
[[autodoc]] tokenizers.normalizers.Replace
## Sequence
[[autodoc]] tokenizers.normalizers.Sequence
## Strip
[[autodoc]] tokenizers.normalizers.Strip
## StripAccents
[[autodoc]] tokenizers.normalizers.StripAccents
</python>
<rust>
The Rust API Reference is available directly on the [Docs.rs](https://docs.rs/tokenizers/latest/tokenizers/) website.
</rust>
<node>
The node API has not been documented yet.
</node>
</tokenizerslangcontent> | 0 |
hf_public_repos/tokenizers/docs/source-doc-builder | hf_public_repos/tokenizers/docs/source-doc-builder/api/encode-inputs.mdx | # Encode Inputs
<tokenizerslangcontent>
<python>
These types represent all the different kinds of input that a [`~tokenizers.Tokenizer`] accepts
when using [`~tokenizers.Tokenizer.encode_batch`].
## TextEncodeInput[[[[tokenizers.TextEncodeInput]]]]
<code>tokenizers.TextEncodeInput</code>
Represents a textual input for encoding. Can be either:
- A single sequence: [TextInputSequence](/docs/tokenizers/api/input-sequences#tokenizers.TextInputSequence)
- A pair of sequences:
- A Tuple of [TextInputSequence](/docs/tokenizers/api/input-sequences#tokenizers.TextInputSequence)
- Or a List of [TextInputSequence](/docs/tokenizers/api/input-sequences#tokenizers.TextInputSequence) of size 2
alias of `Union[str, Tuple[str, str], List[str]]`.
## PreTokenizedEncodeInput[[[[tokenizers.PreTokenizedEncodeInput]]]]
<code>tokenizers.PreTokenizedEncodeInput</code>
Represents a pre-tokenized input for encoding. Can be either:
- A single sequence: [PreTokenizedInputSequence](/docs/tokenizers/api/input-sequences#tokenizers.PreTokenizedInputSequence)
- A pair of sequences:
- A Tuple of [PreTokenizedInputSequence](/docs/tokenizers/api/input-sequences#tokenizers.PreTokenizedInputSequence)
- Or a List of [PreTokenizedInputSequence](/docs/tokenizers/api/input-sequences#tokenizers.PreTokenizedInputSequence) of size 2
alias of `Union[List[str], Tuple[str], Tuple[Union[List[str], Tuple[str]], Union[List[str], Tuple[str]]], List[Union[List[str], Tuple[str]]]]`.
## EncodeInput[[[[tokenizers.EncodeInput]]]]
<code>tokenizers.EncodeInput</code>
Represents all the possible types of input for encoding. Can be:
- When `is_pretokenized=False`: [TextEncodeInput](#tokenizers.TextEncodeInput)
- When `is_pretokenized=True`: [PreTokenizedEncodeInput](#tokenizers.PreTokenizedEncodeInput)
alias of `Union[str, Tuple[str, str], List[str], Tuple[str], Tuple[Union[List[str], Tuple[str]], Union[List[str], Tuple[str]]], List[Union[List[str], Tuple[str]]]]`.
</python>
<rust>
The Rust API Reference is available directly on the [Docs.rs](https://docs.rs/tokenizers/latest/tokenizers/) website.
</rust>
<node>
The node API has not been documented yet.
</node>
</tokenizerslangcontent> | 0 |
hf_public_repos/tokenizers/bindings | hf_public_repos/tokenizers/bindings/node/.eslintrc.json | {
"root": true,
"env": {
"es6": true,
"node": true
},
"extends": [
"eslint:recommended",
"plugin:prettier/recommended"
],
"globals": {
"Atomics": "readonly",
"SharedArrayBuffer": "readonly"
},
"parser": "@typescript-eslint/parser",
"parserOptions": {
"ecmaVersion": 2019,
"sourceType": "module"
},
"plugins": ["@typescript-eslint", "jest", "prettier", "simple-import-sort"],
"rules": {
"@typescript-eslint/no-use-before-define": ["error", { "functions": false }],
"simple-import-sort/sort": "error"
},
"overrides": [
{
"files": "**/*.ts",
"plugins": ["jsdoc"],
"extends": [
"plugin:@typescript-eslint/recommended",
"plugin:jest/recommended",
"plugin:jest/style",
"prettier/@typescript-eslint"
],
"rules": {
"jsdoc/no-types": "error"
}
}
]
}
| 0 |
hf_public_repos/tokenizers/bindings | hf_public_repos/tokenizers/bindings/node/CHANGELOG.md | ## [0.13.2]
- Python only chnages.
## [0.13.1]
- [#1072] Fixing Roberta type ids.
## [0.13.0]
- [#1008] `Decoder` is now a composable trait, but without being backward incompatible
- [#1047, #1051, #1052] `Processor` is now a composable trait, but without being backward incompatible
## [0.12.1]
- [#938] **Reverted breaking change**. https://github.com/huggingface/transformers/issues/16520
## [0.12.0] YANKED
Bump minor version because of a breaking change.
Using `0.12` to match other bindings.
- [#938] [REVERTED IN 0.12.1] **Breaking change**. Decoder trait is modified to be composable. This is only breaking if you are using decoders on their own. tokenizers should be error free.
- [#939] Making the regex in `ByteLevel` pre_tokenizer optional (necessary for BigScience)
- [#952] Fixed the vocabulary size of UnigramTrainer output (to respect added tokens)
- [#954] Fixed not being able to save vocabularies with holes in vocab (ConvBert). Yell warnings instead, but stop panicking.
- [#961] Added link for Ruby port of `tokenizers`
# [0.8.0](https://github.com/huggingface/tokenizers/compare/node-v0.7.0...node-v0.8.0) (2021-09-02)
### BREACKING CHANGES
- Many improvements on the Trainer ([#519](https://github.com/huggingface/tokenizers/pull/519)).
The files must now be provided first when calling `tokenizer.train(files, trainer)`.
### Features
- Adding the `TemplateProcessing`
- Add `WordLevel` and `Unigram` models ([#490](https://github.com/huggingface/tokenizers/pull/490))
- Add `nmtNormalizer` and `precompiledNormalizer` normalizers ([#490](https://github.com/huggingface/tokenizers/pull/490))
- Add `templateProcessing` post-processor ([#490](https://github.com/huggingface/tokenizers/pull/490))
- Add `digitsPreTokenizer` pre-tokenizer ([#490](https://github.com/huggingface/tokenizers/pull/490))
- Add support for mapping to sequences ([#506](https://github.com/huggingface/tokenizers/pull/506))
- Add `splitPreTokenizer` pre-tokenizer ([#542](https://github.com/huggingface/tokenizers/pull/542))
- Add `behavior` option to the `punctuationPreTokenizer` ([#657](https://github.com/huggingface/tokenizers/pull/657))
- Add the ability to load tokenizers from the Hugging Face Hub using `fromPretrained` ([#780](https://github.com/huggingface/tokenizers/pull/780))
### Fixes
- Fix a bug where long tokenizer.json files would be incorrectly deserialized ([#459](https://github.com/huggingface/tokenizers/pull/459))
- Fix RobertaProcessing deserialization in PostProcessorWrapper ([#464](https://github.com/huggingface/tokenizers/pull/464))
# [0.7.0](https://github.com/huggingface/tokenizers/compare/node-v0.6.2...node-v0.7.0) (2020-07-01)
### BREAKING CHANGES
- `robertaProcessing` now handles trimming the offsets (activated by default) ([#236](https://github.com/huggingface/tokenizers/pull/236))
- `charToTokenOffsets`, `charToWordOffsets` and `tokenToWordOffsets` helper functions on `Encoding` instances are removed and replaced by new `wordToTokens`, `wordToChars`, `tokenToChars`, `tokenToWord` and `charToWord` methods ([#234](https://github.com/huggingface/tokenizers/pull/234))
- `encode` and `encodeBatch` methods on a tokenizer now handle pre-tokenized inputs and have their signatures changed ([#249](https://github.com/huggingface/tokenizers/pull/249)). In addition:
- `encodeTokenized`, `encodeTokenizedBatch` methods are therefore removed
- `InputSequence`, `EncodeInput` and `EncodeOptions` types are added
- Improve management of the additional vocabulary ([#309](https://github.com/huggingface/tokenizers/pull/309)):
- New parameter `normalized` in `AddedToken` options, controlling whether a token should be extracted from the normalized version of the input text
- The `AddedToken` constructor now takes a `special` boolean as second parameter to indicate if the token is special (in this case it won't be normalized)
### Features
- Serialization of a `Tokenizer` and all its parts (`PreTokenizer`, `Normalizer`, ...). This adds some methods to easily save/load an entire tokenizer: new static methods `fromString` / `fromFile`, and instance methods `save` / `toString` on `BaseTokenizer` ([#272](https://github.com/huggingface/tokenizers/pull/272))
- New `padToMultipleOf` parameter for `PaddingOptions`, to pad to a multiple of a specified value ([#289](https://github.com/huggingface/tokenizers/pull/289))
- Improved errors generated during truncation when the provided max length is too low ([02cc977](https://github.com/huggingface/tokenizers/commit/02cc97756ffb9193b5d6d8dfcdeb7bf08adf2516))
- Improve BPE training speeds, by reading files sequentially, but parallelizing the processing of each file ([#276](https://github.com/huggingface/tokenizers/pull/276))
- Use `onig` for byte-level pre-tokenization to remove all the differences with the original implementation from GPT-2 ([#280](https://github.com/huggingface/tokenizers/pull/280))
### Fixes
- Fix various crash when training a BPE model ([#286](https://github.com/huggingface/tokenizers/pull/286))
- Fix a few bugs related to additional vocabulary/tokens ([#309](https://github.com/huggingface/tokenizers/pull/309))
## [0.6.2](https://github.com/huggingface/tokenizers/compare/node-v0.6.1...node-v0.6.2) (2020-04-13)
### Features
- More symbols exposed: `Token`, `BaseTokenizer`, `PaddingConfiguration`, `TruncationConfiguration` ([38d53a7](https://github.com/huggingface/tokenizers/commit/38d53a7b84b2ee86b262eee2de6121351fe03889))
- Expose `setPostProcessor` in `BaseTokenizer` ([38d53a7](https://github.com/huggingface/tokenizers/commit/38d53a7b84b2ee86b262eee2de6121351fe03889))
### Fixes
- Fix the word indexes when there are special tokens ([#226](https://github.com/huggingface/tokenizers/pull/226))
- Fix encoding overflowing offsets ([695ab83](https://github.com/huggingface/tokenizers/commit/695ab8388f5f1a7d63d8aaab9b3762312e0d5ac3))
- Fix Roberta overflowings ([c4ecc6f](https://github.com/huggingface/tokenizers/commit/c4ecc6f7ce7af40c558401a3ec9500732a17f9da))
## [0.6.1](https://github.com/huggingface/tokenizers/compare/node-v0.6.0...node-v0.6.1) (2020-04-01)
### Fixes
- Fix special tokens with wrong id ([b770f36](https://github.com/huggingface/tokenizers/commit/b770f364280af33efeffea8f0003102cda8cf1b7))
- Fix `AddedToken`'s `leftStrip` and `rightStrip` params (thanks @thirdwing) ([85488dd](https://github.com/huggingface/tokenizers/commit/85488dd6330ec7fa64aeb78c1a86b221f77c5ebb))
# [0.6.0](https://github.com/huggingface/tokenizers/compare/node-v0.5.0...node-v0.6.0) (2020-03-30)
### BREAKING CHANGES
- The `getOriginalString` method on `Encoding`s has been removed: this brings a reduction of 70% of the memory footprint. You can use the provided new `slice` function as a replacement to get a subpart of a string according to specified indexes while respecting unicode characters. ([#197](https://github.com/huggingface/tokenizers/pull/197))
- The offsets provided on `Encoding` are now relative to the original string, and not the normalized one anymore ([#197](https://github.com/huggingface/tokenizers/pull/197))
- The added tokens given to `addTokens`, `addSpecialTokens` or `train` methods of a tokenizer can now be instances of `AddedToken` to provide more control over these tokens. The support of the `[string, boolean]` format in `addTokens` method is removed. ([#202](https://github.com/huggingface/tokenizers/pull/202))
- The `addSpecialTokens` option for `BertWordpieceTokenizer` has been removed, and must now be passed to `encode` and `encodeBatch` functions ([7dd2400](https://github.com/huggingface/tokenizers/commit/7dd24002148a452f4d9fc55966e181c2dc699203)) ([#193](https://github.com/huggingface/tokenizers/pull/193))
### Features
- `encode` and `encodeBatch` methods on `BaseTokenizer` now take a new optional argument, specifying whether to add the special tokens (activated by default) ([#193](https://github.com/huggingface/tokenizers/pull/193))
- Methods `decode` and `decodeBatch` exposed in `BaseTokenizer` instances ([#184](https://github.com/huggingface/tokenizers/pull/184))
- The `fromFiles` methods for `BPE` and `WordPiece` models are now `async` ([#184](https://github.com/huggingface/tokenizers/pull/184))
- Big improvements in speed for BPE (both training and tokenization) ([#165](https://github.com/huggingface/tokenizers/pull/165))
- `ByteLevel` is also a `PostProcessor` now and handles trimming the offsets if activated. This avoids the unintuitive inclusion of the whitespaces in the produced offsets, even if these whitespaces are part of the actual token. It has been added to `ByteLevelBPETokenizer` but it is off by default. ([#188](https://github.com/huggingface/tokenizers/pull/188))
- New `postProcess`, `encodeTokenized`, `encodeTokenizedBatch` and `normalize` methods on `BaseTokenizer` ([#200](https://github.com/huggingface/tokenizers/pull/200)) ([2aeae55](https://github.com/huggingface/tokenizers/commit/2aeae555e22ac58b11b4956aa3f601bb168e8c3f))
- New `mergeEncodings` static method on `Encoding` class ([#200](https://github.com/huggingface/tokenizers/pull/200)) ([0408567](https://github.com/huggingface/tokenizers/commit/0408567f23d938952f45192a3eff54d48f828882))
- New `wordIndexes` getter and new `charToToken`, `charToTokenOffsets`, `charToWordOffsets` and `tokenToWordOffsets` helper functions on `Encoding` instances ([#200](https://github.com/huggingface/tokenizers/pull/200)) ([ce3cf78](https://github.com/huggingface/tokenizers/commit/ce3cf78ea5423d483895f51f77ff0c7df07f9b0a))
### Fixes
- Fix `longest_first` truncation strategy ([#174](https://github.com/huggingface/tokenizers/issues/174))
- Fix options names in `BPE.fromFiles` ([306f427](https://github.com/huggingface/tokenizers/commit/35540d2e0715e88299f8f04f842e23b5a306f427))
- Actually expose `save` method in `Model` ([ddcf8e8](https://github.com/huggingface/tokenizers/commit/3d143a911bde8d15e1431156fe3cf7676ddcf8e8))
- The errors in async functions are now typed ([7aa6c13](https://github.com/huggingface/tokenizers/commit/4510ea5ce37d84754bb782a99353ac5627aa6c13))
- Trim the decoded string in `bpeDecoder` used by `BPETokenizer` ([#205](https://github.com/huggingface/tokenizers/issues/205)) ([3f4a6b7](https://github.com/huggingface/tokenizers/commit/3f4a6b746b921f339de3279d073b29e019ee2e5a))
# [0.5.0](https://github.com/huggingface/tokenizers/compare/node-v0.4.1...node-v0.5.0) (2020-02-27)
### BREAKING CHANGES
- The `Encoding` object now exposes getters instead of `get...` methods (except for `getOriginalString`) ([9179968](https://github.com/huggingface/tokenizers/commit/917996841df2b3385e0212c9d7e9910d4e0d3fbf))
- `BertWordPieceTokenizer` now cleans up some tokenization artifacts by default while decoding ([#145](https://github.com/huggingface/tokenizers/issues/145)) ([#147](https://github.com/huggingface/tokenizers/pull/147))
### Features
- `Encoding` exposes a new `length` property ([9179968](https://github.com/huggingface/tokenizers/commit/917996841df2b3385e0212c9d7e9910d4e0d3fbf))
- Add a new `stripNormalizer` ([#140](https://github.com/huggingface/tokenizers/pull/140)) ([815d743](https://github.com/huggingface/tokenizers/commit/815d743461f9067ab38237862b7be8114d422300))
- `ByteLevelBPETokenizer` and `BPETokenizer` accept more options ([946ac1a](https://github.com/huggingface/tokenizers/commit/946ac1a9517c3090064e9a972ad71a5cf25b7e7f))
- Add `save` method to `Model` class ([aebc97e](https://github.com/huggingface/tokenizers/commit/aebc97eaf34260c9ed7689dd5e087bf8c8af59fc))
- Improved padding performances ([b30be3b](https://github.com/huggingface/tokenizers/commit/b30be3b2bda977b65f9bdb384258829b2bd91e3d)) ([0dc857e](https://github.com/huggingface/tokenizers/commit/0dc857ea8c557532a52628a6bc80141e65e6d974))
### Fixes
- Methods accepting optional arguments now handle explicit `undefined` correctly ([0fe22a7](https://github.com/huggingface/tokenizers/commit/0fe22a7c1c23f8d992f502a3a582e5212b8281ac))
- Special tokens are now declared only if present in the vocabulary ([b70283c](https://github.com/huggingface/tokenizers/commit/b70283c3050056958e8ba020b0386451cc6df80c))
- Add missing mask/padding special tokens in wordpiece tokenizer ([b70283c](https://github.com/huggingface/tokenizers/commit/b70283c3050056958e8ba020b0386451cc6df80c))
- Fix a bug in `ByteLevelBPETokenizer` that caused offsets to be wrong if a char got split up in multiple bytes ([#156](https://github.com/huggingface/tokenizers/pull/156))
## [0.4.1](https://github.com/huggingface/tokenizers/compare/node-v0.4.0...node-v0.4.1) (2020-02-11)
### Fixes
- Fix punctuation in BertWordPieceTokenizer (Thanks to @Mansterteddy with [#134](https://github.com/huggingface/tokenizers/pull/134))
# [0.4.0](https://github.com/huggingface/tokenizers/compare/node-v0.3.1...node-v0.4.0) (2020-02-05)
### BREAKING CHANGES
- `getOverflowing()` method on `Encoding` now returns all the overflowing `Encoding`s at once ([#77](https://github.com/huggingface/tokenizers/pull/77)) ([0094393](https://github.com/huggingface/tokenizers/commit/0094393610623bafc269790cd1be81fd1474583a))
### Features
- Add `setTruncation`, `disableTruncation`, `setPadding` and `disablePadding` methods in `Tokenizer` and `BaseTokenizer` ([#109](https://github.com/huggingface/tokenizers/pull/109)) ([78e2690](https://github.com/huggingface/tokenizers/commit/78e26905a735e14e67590cb09ddb42ed141c455b))
- Expose tokenizer / truncation / padding configuration in `BaseTokenizer` ([#126](https://github.com/huggingface/tokenizers/pull/126)) ([cb8585b](https://github.com/huggingface/tokenizers/commit/cb8585bc4eb8037c52049da677e4791857231f03))
- Expose `addTokens`, `addSpecialTokens`, `idToToken` and `tokenToId` in `BaseTokenizer` ([7051480](https://github.com/huggingface/tokenizers/commit/7051480c333f88bef80aa6846b66032a2d47383c))
- Add `getOriginalString()` method on `Encoding` ([a14c633](https://github.com/huggingface/tokenizers/commit/a14c63343b217a2c501359bec52baf717e3a05ef))
- Add `charDelimiterSplitPreTokenizer`: a new `PreTokenizer` that allows splitting sequences on the given delimiter (works like `.split(delimiter)`) ([#114](https://github.com/huggingface/tokenizers/pull/114)) ([6165910](https://github.com/huggingface/tokenizers/commit/6165910ca66b6bfd9fd996aa38c4c0b2b6505953))
- Add `robertaProcessing` as a new `PostProcessor` ([#111](https://github.com/huggingface/tokenizers/pull/111)) ([6524f09](https://github.com/huggingface/tokenizers/commit/6524f09e991c3a52c839d8eb01bfa41e81fde1d1))
### Fixes
- Correctly truncate with `OnlyFirst` and `OnlySecond` strategies ([#108](https://github.com/huggingface/tokenizers/issues/108)) ([6d532fe](https://github.com/huggingface/tokenizers/commit/6d532fedb1d3626328828304a5c39807733d2fa1))
- Fix default special tokens in `BertWordPieceTokenizer` ([10e2d28](https://github.com/huggingface/tokenizers/commit/10e2d286caf517f0977c04cf8e1924aed90403c9))
- Fix return type of `getSpecialTokensMask` on `Encoding` ([9770be5](https://github.com/huggingface/tokenizers/commit/9770be566175dc9c44dd7dcaa00a57d0e4ca632b))
- Actually add special tokens in tokenizers implementations ([acef252](https://github.com/huggingface/tokenizers/commit/acef252dacc43adc414175cfc325668ad1488753))
[#1072]: https://github.com/huggingface/tokenizers/pull/1072
[#956]: https://github.com/huggingface/tokenizers/pull/956
[#1008]: https://github.com/huggingface/tokenizers/pull/1008
[#1009]: https://github.com/huggingface/tokenizers/pull/1009
[#1047]: https://github.com/huggingface/tokenizers/pull/1047
[#1055]: https://github.com/huggingface/tokenizers/pull/1055
[#1051]: https://github.com/huggingface/tokenizers/pull/1051
[#1052]: https://github.com/huggingface/tokenizers/pull/1052
[#938]: https://github.com/huggingface/tokenizers/pull/938
[#939]: https://github.com/huggingface/tokenizers/pull/939
[#952]: https://github.com/huggingface/tokenizers/pull/952
[#954]: https://github.com/huggingface/tokenizers/pull/954
[#962]: https://github.com/huggingface/tokenizers/pull/962
[#961]: https://github.com/huggingface/tokenizers/pull/961
[#960]: https://github.com/huggingface/tokenizers/pull/960
| 0 |
hf_public_repos/tokenizers/bindings | hf_public_repos/tokenizers/bindings/node/package.json | {
"name": "tokenizers",
"version": "0.13.3",
"description": "",
"main": "./dist/index.js",
"types": "./dist/index.d.ts",
"repository": {
"type": "git",
"url": "git+https://github.com/huggingface/tokenizers.git"
},
"bugs": {
"url": "https://github.com/huggingface/tokenizers/issues"
},
"homepage": "https://github.com/huggingface/tokenizers/tree/master/bindings/node",
"author": "Anthony MOI <[email protected]>",
"license": "Apache-2.0",
"dependencies": {
"@types/node": "^13.13.52",
"native": "^0.3.3",
"node-pre-gyp": "^0.14.0",
"package.json": "^2.0.1"
},
"devDependencies": {
"@types/jest": "^26.0.24",
"@typescript-eslint/eslint-plugin": "^3.10.1",
"@typescript-eslint/parser": "^3.10.1",
"eslint": "^7.32.0",
"eslint-config-prettier": "^6.15.0",
"eslint-plugin-jest": "^23.20.0",
"eslint-plugin-jsdoc": "^30.7.13",
"eslint-plugin-prettier": "^3.4.1",
"eslint-plugin-simple-import-sort": "^5.0.3",
"jest": "^26.6.3",
"neon-cli": "^0.9.1",
"prettier": "^2.5.1",
"shelljs": "^0.8.3",
"ts-jest": "^26.5.6",
"typescript": "^3.9.10"
},
"engines": {
"node": ">=10 < 11 || >=12 <14"
},
"scripts": {
"dev-ts": "rm -rf dist && tsc -p tsconfig.lib.json && rsync -a $(pwd)/lib/bindings/ dist/bindings/",
"dev-rs": "neon build",
"dev": "npm run dev-rs && npm run dev-ts",
"compile": "neon build --release",
"clean-rs": "neon clean",
"package": "node-pre-gyp package",
"test": "jest && cargo test --manifest-path native/Cargo.toml",
"lint-check": "eslint --ext .js,.ts lib examples",
"lint": "eslint --fix --ext .js,.ts lib examples"
},
"binary": {
"module_name": "index",
"module_path": "./bin-package",
"host": "https://tokenizers-releases.s3.amazonaws.com/node",
"package_name": "{module_name}-v{version}-{node_abi}-{platform}-{arch}-{libc}.tar.gz",
"remote_path": "{version}"
}
}
| 0 |
hf_public_repos/tokenizers/bindings | hf_public_repos/tokenizers/bindings/node/README.md | <p align="center">
<br>
<img src="https://huggingface.co/landing/assets/tokenizers/tokenizers-logo.png" width="600"/>
<br>
<p>
<p align="center">
<a href="https://badge.fury.io/js/tokenizers">
<img alt="Build" src="https://badge.fury.io/js/tokenizers.svg">
</a>
<a href="https://github.com/huggingface/tokenizers/blob/master/LICENSE">
<img alt="GitHub" src="https://img.shields.io/github/license/huggingface/tokenizers.svg?color=blue">
</a>
</p>
<br>
NodeJS implementation of today's most used tokenizers, with a focus on performance and
versatility. Bindings over the [Rust](https://github.com/huggingface/tokenizers/tree/master/tokenizers) implementation.
If you are interested in the High-level design, you can go check it there.
## Main features
- Train new vocabularies and tokenize using 4 pre-made tokenizers (Bert WordPiece and the 3
most common BPE versions).
- Extremely fast (both training and tokenization), thanks to the Rust implementation. Takes
less than 20 seconds to tokenize a GB of text on a server's CPU.
- Easy to use, but also extremely versatile.
- Designed for research and production.
- Normalization comes with alignments tracking. It's always possible to get the part of the
original sentence that corresponds to a given token.
- Does all the pre-processing: Truncate, Pad, add the special tokens your model needs.
## Installation
```bash
npm install tokenizers@latest
```
## Basic example
```ts
import { BertWordPieceTokenizer } from "tokenizers";
const wordPieceTokenizer = await BertWordPieceTokenizer.fromOptions({ vocabFile: "./vocab.txt" });
const wpEncoded = await wordPieceTokenizer.encode("Who is John?", "John is a teacher");
console.log(wpEncoded.length);
console.log(wpEncoded.tokens);
console.log(wpEncoded.ids);
console.log(wpEncoded.attentionMask);
console.log(wpEncoded.offsets);
console.log(wpEncoded.overflowing);
console.log(wpEncoded.specialTokensMask);
console.log(wpEncoded.typeIds);
console.log(wpEncoded.wordIndexes);
```
## Provided Tokenizers
- `BPETokenizer`: The original BPE
- `ByteLevelBPETokenizer`: The byte level version of the BPE
- `SentencePieceBPETokenizer`: A BPE implementation compatible with the one used by SentencePiece
- `BertWordPieceTokenizer`: The famous Bert tokenizer, using WordPiece
## License
[Apache License 2.0](../../LICENSE)
| 0 |
hf_public_repos/tokenizers/bindings | hf_public_repos/tokenizers/bindings/node/build.js | #!/usr/bin/env node
/**
* Inspired by https://github.com/IronCoreLabs/recrypt-node-binding
* ==================================
*
* This script is responsible for compiling and building the NPM release bundle for this repo. The following steps are taken:
*
* + Clean up any existing Rust builds by running `cargo clean`.
* + Run `cargo update` to make sure all dependencies are available.
* + Compile rust code into index.node file.
* + Run unit tests to ensure the library is in good shape for publishing.
* + Move all expected content into a `dist` directory.
* + Generate a binary distribution in `bin-package`.
* + Do a dry run of npm publishing via irish-pub or perform an actual publish step if `--publish` option is provided.
*/
const fs = require("fs");
const path = require("path");
const shell = require("shelljs");
const distPath = "./dist";
// Fail this script if any of these commands fail
shell.set("-e");
// Ensure that our directory is set to the root of the repo
const rootDirectory = path.dirname(process.argv[1]);
shell.cd(rootDirectory);
run()
// Prevent "unhandledRejection" events, allowing to actually exit with error
.catch(() => process.exit(1));
/***************************************/
async function run() {
const arg = process.argv.slice(2)[0];
switch (arg) {
case "--all":
buildRust();
buildTs();
break;
case "--rust":
buildRust();
break;
case "--typescript":
buildTs();
break;
case "--package-rust":
buildRust();
await packageRust();
break;
case "--npm-publish":
buildTs();
npmPublish();
break;
default:
shell.echo("No arg provided, doing nothing...");
break;
}
}
function buildRust() {
shell.echo("BUILDING RUST...");
// Cleanup the previous build, if it exists
shell.rm("-rf", "./bin-package");
shell.rm("-rf", "./build");
// Cleanup any previous Rust builds, update deps, and compile
shell.exec("npm ci --ignore-scripts");
shell.exec("npm run clean-rs");
shell.pushd("./native");
shell.exec("cargo update");
shell.popd();
shell.exec("npm run compile");
shell.echo("BUILDING RUST COMPLETE...");
}
async function packageRust() {
shell.echo("PACKAGING RUST...");
shell.mkdir("./bin-package");
shell.cp("./native/index.node", "./bin-package");
shell.exec("npm run package");
const version = JSON.parse(await fs.promises.readFile("./package.json")).version;
const tarPath = `build/stage/${version}`;
const tgz = (await fs.promises.readdir(tarPath)).find(f => f.endsWith(".tar.gz"));
shell.cp(`${tarPath}/${tgz}`, "./bin-package/");
shell.echo("PACKAGING RUST COMPLETE...");
}
function buildTs() {
shell.echo("BUILDING TS...");
// Cleanup the previous build, if it exists
shell.rm("-rf", distPath);
shell.exec("npm ci --ignore-scripts");
shell.mkdir(distPath);
shell.exec("npx tsc -p tsconfig.prod.json");
shell.echo("BUILDING TS COMPLETE...");
}
async function npmPublish() {
shell.echo("PUBLISHING ON NPM...");
shell.cp("-ur", ["lib/bindings/**/*.{js,d.ts}"], `${distPath}/bindings/`);
shell.mv([`${distPath}/bindings/native.prod.js`], [`${distPath}/bindings/native.js`]);
// shell.rm("-r", [`${distPath}/**/*.test.ts`]); // No more remaining *.test.ts files for now at this step
shell.cp("-r", ["package.json", "README.md", "../../LICENSE"], distPath);
// Add a NPM install script to the package.json that we push to NPM so that when consumers pull it down it
// runs the expected node-pre-gyp step.
const npmPackageJson = require(`${distPath}/package.json`);
npmPackageJson.scripts.install = "node-pre-gyp install";
npmPackageJson.main = "./index.js";
npmPackageJson.types = "./index.d.ts";
await fs.promises.writeFile(
`${distPath}/package.json`,
JSON.stringify(npmPackageJson, null, 2)
);
shell.exec(`npm publish ${distPath} --access public`);
shell.echo("PUBLISHING ON NPM COMPLETE...");
}
| 0 |
hf_public_repos/tokenizers/bindings | hf_public_repos/tokenizers/bindings/node/.prettierrc.json | {
"$schema": "http://json.schemastore.org/prettierrc",
"printWidth": 90
}
| 0 |
hf_public_repos/tokenizers/bindings | hf_public_repos/tokenizers/bindings/node/.eslintignore | node_modules
dist
coverage
| 0 |
hf_public_repos/tokenizers/bindings | hf_public_repos/tokenizers/bindings/node/tsconfig.prod.json | {
"extends": "./tsconfig.json",
"exclude": [
"./**/*.test.ts"
],
"compilerOptions": {
"sourceMap": false
}
} | 0 |
hf_public_repos/tokenizers/bindings | hf_public_repos/tokenizers/bindings/node/jest.config.js | /* eslint-disable prettier/prettier */
// For a detailed explanation regarding each configuration property, visit:
// https://jestjs.io/docs/en/configuration.html
module.exports = {
// All imported modules in your tests should be mocked automatically
// automock: false,
// Stop running tests after `n` failures
// bail: 0,
// Respect "browser" field in package.json when resolving modules
// browser: false,
// The directory where Jest should store its cached dependency information
// cacheDirectory: "/private/var/folders/y_/n6h0fkqn3m57bg_ktk25j7rm0000gn/T/jest_dx",
// Automatically clear mock calls and instances between every test
// clearMocks: false,
// Indicates whether the coverage information should be collected while executing the test
// collectCoverage: false,
// An array of glob patterns indicating a set of files for which coverage information should be collected
// collectCoverageFrom: null,
// The directory where Jest should output its coverage files
// coverageDirectory: null,
// An array of regexp pattern strings used to skip coverage collection
// coveragePathIgnorePatterns: [
// "/node_modules/"
// ],
// A list of reporter names that Jest uses when writing coverage reports
// coverageReporters: [
// "json",
// "text",
// "lcov",
// "clover"
// ],
// An object that configures minimum threshold enforcement for coverage results
// coverageThreshold: null,
// A path to a custom dependency extractor
// dependencyExtractor: null,
// Make calling deprecated APIs throw helpful error messages
// errorOnDeprecated: false,
// Force coverage collection from ignored files using an array of glob patterns
// forceCoverageMatch: [],
// A path to a module which exports an async function that is triggered once before all test suites
// globalSetup: null,
// A path to a module which exports an async function that is triggered once after all test suites
// globalTeardown: null,
// A set of global variables that need to be available in all test environments
// globals: {},
// The maximum amount of workers used to run your tests. Can be specified as % or a number. E.g. maxWorkers: 10% will use 10% of your CPU amount + 1 as the maximum worker number. maxWorkers: 2 will use a maximum of 2 workers.
// maxWorkers: "50%",
// An array of directory names to be searched recursively up from the requiring module's location
// moduleDirectories: [
// "node_modules"
// ],
// An array of file extensions your modules use
// moduleFileExtensions: [
// "js",
// "json",
// "jsx",
// "ts",
// "tsx",
// "node"
// ],
// A map from regular expressions to module names that allow to stub out resources with a single module
// moduleNameMapper: {},
// An array of regexp pattern strings, matched against all module paths before considered 'visible' to the module loader
// modulePathIgnorePatterns: [],
// Activates notifications for test results
// notify: false,
// An enum that specifies notification mode. Requires { notify: true }
// notifyMode: "failure-change",
// A preset that is used as a base for Jest's configuration
preset: "ts-jest",
// Run tests from one or more projects
// projects: null,
// Use this configuration option to add custom reporters to Jest
// reporters: undefined,
// Automatically reset mock state between every test
// resetMocks: false,
// Reset the module registry before running each individual test
// resetModules: false,
// A path to a custom resolver
// resolver: null,
// Automatically restore mock state between every test
// restoreMocks: false,
// The root directory that Jest should scan for tests and modules within
// rootDir: null,
// A list of paths to directories that Jest should use to search for files in
// roots: [
// "<rootDir>"
// ],
// Allows you to use a custom runner instead of Jest's default test runner
// runner: "jest-runner",
// The paths to modules that run some code to configure or set up the testing environment before each test
// setupFiles: [],
// A list of paths to modules that run some code to configure or set up the testing framework before each test
// setupFilesAfterEnv: [],
// A list of paths to snapshot serializer modules Jest should use for snapshot testing
// snapshotSerializers: [],
// The test environment that will be used for testing
testEnvironment: "node",
// Options that will be passed to the testEnvironment
// testEnvironmentOptions: {},
// Adds a location field to test results
// testLocationInResults: false,
// The glob patterns Jest uses to detect test files
// testMatch: [
// "**/__tests__/**/*.[jt]s?(x)",
// "**/?(*.)+(spec|test).[tj]s?(x)"
// ],
// An array of regexp pattern strings that are matched against all test paths, matched tests are skipped
testPathIgnorePatterns: [
"/node_modules/",
"/dist/"
],
// The regexp pattern or array of patterns that Jest uses to detect test files
// testRegex: [],
// This option allows the use of a custom results processor
// testResultsProcessor: null,
// This option allows use of a custom test runner
// testRunner: "jasmine2",
// This option sets the URL for the jsdom environment. It is reflected in properties such as location.href
// testURL: "http://localhost",
// Setting this value to "fake" allows the use of fake timers for functions such as "setTimeout"
// timers: "real",
// A map from regular expressions to paths to transformers
// transform: null,
// An array of regexp pattern strings that are matched against all source file paths, matched files will skip transformation
// transformIgnorePatterns: [
// "/node_modules/"
// ],
// An array of regexp pattern strings that are matched against all modules before the module loader will automatically return a mock for them
// unmockedModulePathPatterns: undefined,
// Indicates whether each individual test should be reported during the run
// verbose: null,
// An array of regexp patterns that are matched against all source file paths before re-running tests in watch mode
watchPathIgnorePatterns: [
"<rootDir>/node_modules/",
"<rootDir>/native/",
"<rootDir>/dist/",
"<rootDir>/build/"
],
// Whether to use watchman for file crawling
// watchman: true,
};
| 0 |
hf_public_repos/tokenizers/bindings | hf_public_repos/tokenizers/bindings/node/tsconfig.json | {
"include": [
"./lib"
],
"compilerOptions": {
/* Basic Options */
// "incremental": true, /* Enable incremental compilation */
"target": "ES2017", /* Specify ECMAScript target version: 'ES3' (default), 'ES5', 'ES2015', 'ES2016', 'ES2017', 'ES2018', 'ES2019' or 'ESNEXT'. */
"module": "commonjs", /* Specify module code generation: 'none', 'commonjs', 'amd', 'system', 'umd', 'es2015', or 'ESNext'. */
"lib": [], /* Specify library files to be included in the compilation. */
// "allowJs": true, /* Allow javascript files to be compiled. */
// "checkJs": true, /* Report errors in .js files. */
// "jsx": "preserve", /* Specify JSX code generation: 'preserve', 'react-native', or 'react'. */
"declaration": true, /* Generates corresponding '.d.ts' file. */
// "declarationMap": true, /* Generates a sourcemap for each corresponding '.d.ts' file. */
"sourceMap": true, /* Generates corresponding '.map' file. */
// "outFile": "./", /* Concatenate and emit output to single file. */
"outDir": "./dist", /* Redirect output structure to the directory. */
"rootDir": "./lib", /* Specify the root directory of input files. Use to control the output directory structure with --outDir. */
// "composite": true, /* Enable project compilation */
// "tsBuildInfoFile": "./", /* Specify file to store incremental compilation information */
// "removeComments": true, /* Do not emit comments to output. */
// "noEmit": true, /* Do not emit outputs. */
// "importHelpers": true, /* Import emit helpers from 'tslib'. */
// "downlevelIteration": true, /* Provide full support for iterables in 'for-of', spread, and destructuring when targeting 'ES5' or 'ES3'. */
// "isolatedModules": true, /* Transpile each file as a separate module (similar to 'ts.transpileModule'). */
/* Strict Type-Checking Options */
"strict": true, /* Enable all strict type-checking options. */
// "noImplicitAny": true, /* Raise error on expressions and declarations with an implied 'any' type. */
// "strictNullChecks": true, /* Enable strict null checks. */
// "strictFunctionTypes": true, /* Enable strict checking of function types. */
// "strictBindCallApply": true, /* Enable strict 'bind', 'call', and 'apply' methods on functions. */
// "strictPropertyInitialization": true, /* Enable strict checking of property initialization in classes. */
// "noImplicitThis": true, /* Raise error on 'this' expressions with an implied 'any' type. */
// "alwaysStrict": true, /* Parse in strict mode and emit "use strict" for each source file. */
/* Additional Checks */
// "noUnusedLocals": true, /* Report errors on unused locals. */
// "noUnusedParameters": true, /* Report errors on unused parameters. */
// "noImplicitReturns": true, /* Report error when not all code paths in function return a value. */
// "noFallthroughCasesInSwitch": true, /* Report errors for fallthrough cases in switch statement. */
/* Module Resolution Options */
"moduleResolution": "node", /* Specify module resolution strategy: 'node' (Node.js) or 'classic' (TypeScript pre-1.6). */
// "baseUrl": "./", /* Base directory to resolve non-absolute module names. */
// "paths": {}, /* A series of entries which re-map imports to lookup locations relative to the 'baseUrl'. */
// "rootDirs": [], /* List of root folders whose combined content represents the structure of the project at runtime. */
// "typeRoots": [], /* List of folders to include type definitions from. */
// "types": [], /* Type declaration files to be included in compilation. */
// "allowSyntheticDefaultImports": true, /* Allow default imports from modules with no default export. This does not affect code emit, just typechecking. */
"esModuleInterop": true /* Enables emit interoperability between CommonJS and ES Modules via creation of namespace objects for all imports. Implies 'allowSyntheticDefaultImports'. */
// "preserveSymlinks": true, /* Do not resolve the real path of symlinks. */
// "allowUmdGlobalAccess": true, /* Allow accessing UMD globals from modules. */
/* Source Map Options */
// "sourceRoot": "", /* Specify the location where debugger should locate TypeScript files instead of source locations. */
// "mapRoot": "", /* Specify the location where debugger should locate map files instead of generated locations. */
// "inlineSourceMap": true, /* Emit a single file with source maps instead of having a separate file. */
// "inlineSources": true, /* Emit the source alongside the sourcemaps within a single file; requires '--inlineSourceMap' or '--sourceMap' to be set. */
/* Experimental Options */
// "experimentalDecorators": true, /* Enables experimental support for ES7 decorators. */
// "emitDecoratorMetadata": true, /* Enables experimental support for emitting type metadata for decorators. */
}
}
| 0 |
hf_public_repos/tokenizers/bindings | hf_public_repos/tokenizers/bindings/node/tsconfig.lib.json | {
"extends": "./tsconfig.json",
"exclude": [
"./**/*.test.ts"
]
} | 0 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.