import useLLM from "@react-llm/headless"; const Loader = () => { const { loadingStatus, isReady, init, gpuDevice } = useLLM(); if (isReady) return null; if (loadingStatus.progress === 1) return null; if (gpuDevice.unsupportedReason) { return (
Sorry, unsupported!
Reason: {gpuDevice.unsupportedReason}
This project runs models in the browser with WebGPU and only works in Google Chrome v113 and above on Desktop with supported GPUs. Experimental support may be available for desktop Firefox and Safari Tech Preview.
Proof of concept using Transformers.js embeddings in a WebGPU powered LLM chat.
No data is sent to the server. Conversations are cached in local storage.
WebGPU is only supported in Desktop Google Chrome 113
Powered by Apache TVM and MLC Relax Runtime.
Model is Vicuna 7b trained by LMSys.