import { Message } from '@/types/chat'; import { OpenAIModel } from '@/types/openai'; import { OPENAI_API_HOST } from '../app/const'; import { ParsedEvent, ReconnectInterval, createParser, } from 'eventsource-parser'; export class LLMError extends Error { type: string; param: string; code: string; constructor(message: string, type: string, param: string, code: string) { super(message); this.name = 'LLMError'; this.type = type; this.param = param; this.code = code; } } export const LLMStream = async ( model: OpenAIModel, systemPrompt: string, temperature : number, key: string, messages: Message[], ) => { let url = `${OPENAI_API_HOST}/v1/chat/completions`; const res = await fetch(url, { headers: { 'Content-Type': 'application/json' }, method: 'POST', body: JSON.stringify({ messages: [ { role: 'system', content: systemPrompt, }, ...messages, ], max_tokens: 1000, temperature: temperature, stream: true, }), }); const encoder = new TextEncoder(); const decoder = new TextDecoder(); if (res.status !== 200) { const result = await res.json(); if (result.error) { throw new LLMError( result.error.message, result.error.type, result.error.param, result.error.code, ); } else { throw new Error( `LLM API returned an error: ${ decoder.decode(result?.value) || result.statusText }`, ); } } const stream = new ReadableStream({ async start(controller) { const onParse = (data) => { if (data.startsWith('data:')) { const jsonString = data.replace('data: ', '').trim(); try { const json = JSON.parse(jsonString); if (json.choices[0].finish_reason === 'stop') { controller.close(); return; } const text = json.choices[0].message.content; const queue = encoder.encode(text); controller.enqueue(queue); } catch (e) { controller.error(e); } } else if (data.startsWith('event: done')) { controller.close(); } }; for await (const chunk of res.body as any) { const dataString = decoder.decode(chunk); const dataParts = dataString.split("\n\n"); dataParts.forEach(part => { onParse(part); }); } }, }); return stream; };