Spaces:
Running
on
CPU Upgrade
Running
on
CPU Upgrade
File size: 4,980 Bytes
b2ecf7d 9d298eb b2ecf7d 9d298eb b2ecf7d 6c7ce80 b2ecf7d |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 |
<script lang="ts">
import { InferenceDisplayability } from "@huggingface/tasks";
import { type WidgetProps, type ModelLoadInfo, LoadState, ComputeType } from "../types.js";
import IconAzureML from "../../..//Icons/IconAzureML.svelte";
import IconInfo from "../../..//Icons/IconInfo.svelte";
export let model: WidgetProps["model"];
export let computeTime: string = "";
export let error: string = "";
export let modelLoadInfo: ModelLoadInfo | undefined = undefined;
export let modelTooBig = false;
const state = {
[LoadState.Loadable]: "This model can be loaded on the Inference API on-demand.",
[LoadState.Loaded]: "This model is currently loaded and running on the Inference API.",
[LoadState.TooBig]:
"Model is too large to load onto the free Inference API. To try the model, launch it on Inference Endpoints instead.",
[LoadState.Error]: "⚠️ This model could not be loaded by the inference API. ⚠️",
} as const;
const azureState = {
[LoadState.Loadable]: "This model can be loaded loaded on AzureML Managed Endpoint",
[LoadState.Loaded]: "This model is loaded and running on AzureML Managed Endpoint",
[LoadState.TooBig]:
"Model is too large to load onto the free Inference API. To try the model, launch it on Inference Endpoints instead.",
[LoadState.Error]: "⚠️ This model could not be loaded.",
} as const;
function getStatusReport(
modelLoadInfo: ModelLoadInfo | undefined,
statuses: Record<LoadState, string>,
isAzure = false
): string {
if (!modelLoadInfo) {
return "Model state unknown";
}
if (modelLoadInfo.compute_type === ComputeType.CPU && modelLoadInfo.state === LoadState.Loaded && !isAzure) {
return `The model is loaded and running on <a class="hover:underline" href="https://huggingface.co/intel" target="_blank">Intel Xeon 3rd Gen Scalable CPU</a>`;
}
return statuses[modelLoadInfo.state];
}
function getComputeTypeMsg(): string {
const computeType = modelLoadInfo?.compute_type ?? ComputeType.CPU;
if (computeType === ComputeType.CPU) {
return "Intel Xeon 3rd Gen Scalable cpu";
}
return computeType;
}
</script>
<div class="mt-2">
<div class="text-xs text-gray-400">
{#if model.id === "bigscience/bloom"}
<div class="flex items-baseline">
<div class="flex items-center whitespace-nowrap text-gray-700">
<IconAzureML classNames="mr-1 flex-none" /> Powered by
<a
class="underline hover:text-gray-800"
href="https://azure.microsoft.com/products/machine-learning"
target="_blank">AzureML</a
>
</div>
<div class="border-dotter mx-2 flex flex-1 -translate-y-px border-b border-gray-100" />
<div>
{@html getStatusReport(modelLoadInfo, azureState, true)}
</div>
</div>
{:else if computeTime}
Computation time on {getComputeTypeMsg()}: {computeTime}
{:else if (model.inference === InferenceDisplayability.Yes || model.pipeline_tag === "reinforcement-learning") && !modelTooBig}
{@html getStatusReport(modelLoadInfo, state)}
{:else if model.inference === InferenceDisplayability.ExplicitOptOut}
<span class="text-sm text-gray-500">Inference API has been turned off for this model.</span>
{:else if model.inference === InferenceDisplayability.CustomCode}
<span class="text-sm text-gray-500">Inference API does not yet support model repos that contain custom code.</span
>
{:else if model.inference === InferenceDisplayability.LibraryNotDetected}
<span class="text-sm text-gray-500">
Unable to determine this model's library. Check the
<a class="color-inherit" href="/docs/hub/model-cards#specifying-a-library">
docs <IconInfo classNames="inline" />
</a>.
</span>
{:else if model.inference === InferenceDisplayability.PipelineNotDetected}
<span class="text-sm text-gray-500">
Unable to determine this model’s pipeline type. Check the
<a class="color-inherit" href="/docs/hub/models-widgets#enabling-a-widget">
docs <IconInfo classNames="inline" />
</a>.
</span>
{:else if model.inference === InferenceDisplayability.PipelineLibraryPairNotSupported}
<span class="text-sm text-gray-500">
Inference API does not yet support {model.library_name} models for this pipeline type.
</span>
{:else if modelTooBig}
<span class="text-sm text-gray-500">
Model is too large to load onto the free Inference API. To try the model, launch it on <a
class="underline"
href="https://ui.endpoints.huggingface.co/new?repository={encodeURIComponent(model.id)}"
>Inference Endpoints</a
>
instead.
</span>
{:else}
<!-- added as a failsafe but this case cannot currently happen -->
<span class="text-sm text-gray-500">
Inference API is disabled for an unknown reason. Please open a
<a class="color-inherit underline" href="/{model.id}/discussions/new">Discussion in the Community tab</a>.
</span>
{/if}
</div>
{#if error}
<div class="alert alert-error mt-3">{error}</div>
{/if}
</div>
|