File size: 1,461 Bytes
8c7e6f1
 
 
f977d49
 
8c7e6f1
 
 
f977d49
8c7e6f1
 
 
f977d49
8c7e6f1
 
 
f977d49
 
 
 
 
 
 
8c7e6f1
f977d49
 
 
 
 
 
 
 
 
 
 
 
 
8c7e6f1
 
 
f977d49
 
 
 
 
 
8c7e6f1
f977d49
 
 
 
 
 
 
8c7e6f1
f977d49
 
 
 
8c7e6f1
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
import { HfInference } from '@huggingface/inference';

export interface Message {
	role: string;
	content: string;
}

export function createHfInference(token: string): HfInference {
	return new HfInference(token);
}

export function prepareRequestMessages(systemMessage: Message, messages: Message[]): Message[] {
	return [...(systemMessage.content.length ? [systemMessage] : []), ...messages];
}

export async function handleStreamingResponse(
	hf: HfInference,
	model: string,
	messages: Message[],
	temperature: number,
	maxTokens: number,
	jsonMode: boolean,
	onChunk: (content: string) => void
): Promise<void> {
	let out = '';
	for await (const chunk of hf.chatCompletionStream({
		model: model,
		messages: messages,
		temperature: temperature,
		max_tokens: maxTokens,
		json_mode: jsonMode
	})) {
		if (chunk.choices && chunk.choices.length > 0 && chunk.choices[0]?.delta?.content) {
			out += chunk.choices[0].delta.content;
			onChunk(out);
		}
	}
}

export async function handleNonStreamingResponse(
	hf: HfInference,
	model: string,
	messages: Message[],
	temperature: number,
	maxTokens: number,
	jsonMode: boolean
): Promise<Message> {
	const response = await hf.chatCompletion({
		model: model,
		messages: messages,
		temperature: temperature,
		max_tokens: maxTokens,
		json_mode: jsonMode
	});

	if (response.choices && response.choices.length > 0) {
		return response.choices[0].message;
	}
	throw new Error('No response from the model');
}