File size: 4,940 Bytes
a417977 1982de5 a417977 1982de5 a417977 1982de5 a417977 1982de5 a417977 1982de5 a417977 1982de5 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 |
import {Post, Topic} from "@/contexts/topics";
import {Settings} from "@/contexts/settings";
import {LogAction} from "@/contexts/log";
// @see https://github.com/openai/openai-node/blob/14784f95797d4d525dafecfd4ec9c7a133540da0/src/resources/chat/completions.ts
type OobaboogaStreamChunk = {
id: string; // Unique identifier for the chunk
object: string; // The type of the chunk, e.g., "text_completion.chunk"
created: number; // Unix timestamp of when the chunk was created
model: string; // Name or identifier of the model generating the completion
choices: {
index: number; // The index of the choice in the completion
finish_reason: string | null; // Reason why the completion stopped, or null if still in progress
text: string; // The generated text for this chunk
logprobs: {
top_logprobs: Record<string, number>[]; // Log probabilities for the top tokens, as an array of key-value pairs
};
}[];
usage?: {
prompt_tokens: number; // Number of tokens in the prompt
completion_tokens: number; // Number of tokens generated in the completion
total_tokens: number; // Total tokens used
};
};
// TODO
export async function generatePosts(settings: Settings, topic: Topic): Promise<Post[]> {
// console.log(settings);
const rawOutput = await fetApiWithStream(settings, tokenizeTopic(topic));
// const rawOutput = await fetApi(settings);
// console.log(rawOutput);
// let rawOutput = "rawOutput";
console.log("rawOutput");
console.log(rawOutput);
return tokensToPosts(rawOutput);
}
const postEndToken = "<|end_of_post|>";
// @see https://github.com/openai/openai-node/issues/18
// nPosts: number of post before stop
async function fetApiWithStream(settings: Settings, log: LogAction, prompt: string): Promise<string> {
const postCount = settings.postCount;
const controller = new AbortController()
const response = await fetch(new URL("/v1/completions", settings.apiURL), {
method: "POST",
headers: {
"Content-Type": "application/json",
},
body: JSON.stringify({
prompt,
temperature: settings.temperature,
max_tokens: 2000,
stream: true,
skip_special_tokens: false,
// stop: "<|end_of_post|>"
// top_p: 1,
// frequency_penalty: 0,
// presence_penalty: 0,
}),
signal: controller.signal,
});
if (!response.ok) {
throw new Error(`Failed to fetch API (${response.status} ${response.statusText}): ${await response.text()}`);
}
// console.log("Streaming !!!!");
//
// const decoderStream = new TextDecoderStream("utf-8");
// const writer = new WritableStream({
// write(rawChunk: string) {
// // output.innerHTML += chunk;
// const chunk = JSON.parse(rawChunk.trimStart().slice(6)) as OobaboogaStreamChunk; // remove "data: " and parse
// console.log(chunk)
// }
// });
console.log(`Fetching topic with ${postCount} posts...`);
let endTokenCount = 0;
let tokens = ""; // Dont know why but the first token is skipped
let finishReason: string | null = null;
try {
await response.body.pipeThrough(new TextDecoderStream("utf-8")).pipeTo(new WritableStream({
write(rawChunk: string) {
// chunk can contains multiple lines, one chunk data per line
for (const rawChunkLine of rawChunk.split("\n")) {
if (!rawChunkLine.startsWith("data:")) continue;
const chunk = JSON.parse(rawChunkLine.slice(6)) as OobaboogaStreamChunk; // remove "data: " and parse
const text = chunk.choices[0].text;
console.log(text)
tokens += chunk.choices[0].text;
if (text.includes(postEndToken)) {
endTokenCount++;
if (endTokenCount >= postCount) {
finishReason = "custom_stop";
controller.abort();
break;
}
} else {
finishReason = chunk.choices[0].finish_reason;
}
}
// output.innerHTML += chunk;
// console.log("----")
// console.log(rawChunk)
// console.log(rawChunk.slice(6).trimEnd())
// console.log(chunk.choices[0].text)
// tokens += chunk.choices[0].text;
}
}));
} catch (e) {
if (e.name !== 'AbortError') {
throw e;
}
}
console.log("Done fetching data")
console.log(`Finish reason: ${finishReason}`)
console.log(`Tokens: ${tokens}`)
return tokens;
} |