Update lib/@randydev/together/qwen-ai.js
Browse files
lib/@randydev/together/qwen-ai.js
CHANGED
@@ -1,5 +1,25 @@
|
|
1 |
import got from 'got';
|
2 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
3 |
export async function CloudFlareQwenTogether(message, { system_prompt = "" } = {}) {
|
4 |
const run = await got.post(`https://api.cloudflare.com/client/v4/accounts/${process.env['ACCOUNT_ID']}/ai/run/@cf/qwen/qwen1.5-1.8b-chat`, {
|
5 |
headers: {
|
|
|
1 |
import got from 'got';
|
2 |
|
3 |
+
import { HUGGING_TOKEN } from "../config.js";
|
4 |
+
|
5 |
+
import { InferenceClient } from "@huggingface/inference";
|
6 |
+
|
7 |
+
const client = new InferenceClient(HUGGING_TOKEN);
|
8 |
+
|
9 |
+
export async function Qwen323NewResponse(message) {
|
10 |
+
const chatCompletion = await client.chatCompletion({
|
11 |
+
provider: "hf-inference",
|
12 |
+
model: "Qwen/Qwen3-235B-A22B",
|
13 |
+
messages: [
|
14 |
+
{
|
15 |
+
role: "user",
|
16 |
+
content: message
|
17 |
+
},
|
18 |
+
],
|
19 |
+
});
|
20 |
+
return chatCompletion.choices[0].message
|
21 |
+
}
|
22 |
+
|
23 |
export async function CloudFlareQwenTogether(message, { system_prompt = "" } = {}) {
|
24 |
const run = await got.post(`https://api.cloudflare.com/client/v4/accounts/${process.env['ACCOUNT_ID']}/ai/run/@cf/qwen/qwen1.5-1.8b-chat`, {
|
25 |
headers: {
|