"use client"; import React, { useEffect } from "react"; import { CheckCircledIcon, CrossCircledIcon, DotFilledIcon, HamburgerMenuIcon, InfoCircledIcon, } from "@radix-ui/react-icons"; import { Message } from "ai/react"; import { toast } from "sonner"; import { Sheet, SheetContent, SheetTrigger } from "@/components/ui/sheet"; import { Tooltip, TooltipContent, TooltipProvider, TooltipTrigger, } from "@/components/ui/tooltip"; import { encodeChat, tokenLimit } from "@/lib/token-counter"; import { basePath, useHasMounted } from "@/lib/utils"; import { Sidebar } from "../sidebar"; import { ChatOptions } from "./chat-options"; interface ChatTopbarProps { chatOptions: ChatOptions; setChatOptions: React.Dispatch>; isLoading: boolean; chatId?: string; setChatId: React.Dispatch>; messages: Message[]; } export default function ChatTopbar({ chatOptions, setChatOptions, isLoading, chatId, setChatId, messages, }: ChatTopbarProps) { const hasMounted = useHasMounted(); const currentModel = chatOptions && chatOptions.selectedModel; const [error, setError] = React.useState(undefined); const fetchData = async () => { if (!hasMounted) { return null; } try { const res = await fetch(basePath + "/api/models"); if (!res.ok) { const errorResponse = await res.json(); const errorMessage = `Connection to vLLM server failed: ${errorResponse.error} [${res.status} ${res.statusText}]`; throw new Error(errorMessage); } const data = await res.json(); // Extract the "name" field from each model object and store them in the state const modelNames = data.data.map((model: any) => model.id); // save the first and only model in the list as selectedModel in localstorage setChatOptions({ ...chatOptions, selectedModel: modelNames[0] }); } catch (error) { setChatOptions({ ...chatOptions, selectedModel: undefined }); toast.error(error as string); } }; useEffect(() => { fetchData(); }, [hasMounted]); if (!hasMounted) { return (
Booting up..
); } const chatTokens = messages.length > 0 ? encodeChat(messages) : 0; return (
{currentModel !== undefined && ( <> {isLoading ? ( ) : (

Current Model

{currentModel}

)} {isLoading ? "Generating.." : "Ready"} )} {currentModel === undefined && ( <> Connection to vLLM server failed )}
{chatTokens > tokenLimit && (

Token limit exceeded. Truncating middle messages.

)} {messages.length > 0 && ( {chatTokens} / {tokenLimit} token{chatTokens > 1 ? "s" : ""} )}
); }