import useLLM from "@react-llm/headless"; const Loader = () => { const { loadingStatus, isReady, init, gpuDevice } = useLLM(); if (isReady) return null; if (loadingStatus.progress === 1) return null; if (gpuDevice.unsupportedReason) { return (
Sorry, unsupported!
Reason: {gpuDevice.unsupportedReason}
react-llm runs models in the browser with WebGPU and only works in Google Chrome v113 and above on Desktop with supported GPUs.
web-llm-embed
No data is sent to the server. Conversations are cached in local storage.
WebGPU is only supported in Desktop Google Chrome 113
Powered by Apache TVM and MLC Relax Runtime.
Model is Vicuna 7b trained by LLMSys. It can be deleted in Cache Storage (tvmjs).