ngxson's picture
ngxson HF staff
add checkbox for noise, follow scroll input
64db5cc
import { useEffect, useRef, useState } from 'react';
import { CONFIG } from '../config';
import {
getBlogPrompt,
getPromptGeneratePodcastScript,
} from '../utils/prompts';
//import { getSSEStreamAsync } from '../utils/utils';
import { EXAMPLES } from '../examples';
import { HfInference } from '@huggingface/inference';
import { isBlogMode } from '../utils/utils';
interface SplitContent {
thought: string;
codeBlock: string;
}
const getFromTo = (content: string, from: string, to: string): string => {
const firstSplit = content.split(from, 2);
if (firstSplit[1] !== undefined) {
const secondSplit = firstSplit[1].split(to, 1);
return secondSplit[0];
} else {
return '';
}
};
const splitContent = (content: string): SplitContent => {
return {
thought: getFromTo(content, '<think>', '</think>').trim(),
codeBlock: getFromTo(content, '```yaml', '```').trim(),
};
};
export const ScriptMaker = ({
setScript,
setBlogURL,
setBusy,
busy,
hfToken,
}: {
setScript: (script: string) => void;
setBlogURL: (url: string) => void;
setBusy: (busy: boolean) => void;
busy: boolean;
hfToken: string;
}) => {
const [model, setModel] = useState<string>(CONFIG.inferenceProviderModels[0]);
const [customModel, setCustomModel] = useState<string>(
CONFIG.inferenceProviderModels[0]
);
const usingModel = model === 'custom' ? customModel : model;
const [input, setInput] = useState<string>('');
const [note, setNote] = useState<string>(isBlogMode ? getBlogPrompt() : '');
const [thought, setThought] = useState<string>('');
const [isGenerating, setIsGenerating] = useState<boolean>(false);
const refThought = useRef<HTMLTextAreaElement | null>(null);
useEffect(() => {
setBusy(isGenerating);
}, [isGenerating]);
useEffect(() => {
setTimeout(() => {
// auto scroll
if (refThought.current) {
refThought.current.scrollTop = refThought.current.scrollHeight;
}
}, 10);
}, [thought]);
const generate = async () => {
setIsGenerating(true);
setThought('');
try {
let responseContent = '';
/*
const fetchResponse = await fetch(CONFIG.llmEndpoint, {
method: 'POST',
headers: {
'Content-Type': 'application/json',
'Authorization': `Bearer ${hfToken}`,
},
body: JSON.stringify({
model: usingModel,
messages: [
{
role: 'user',
content: getPromptGeneratePodcastScript(input, note),
},
],
temperature: 0.3,
stream: true,
provider: CONFIG.inferenceProvider,
}),
});
if (fetchResponse.status !== 200) {
const body = await fetchResponse.json();
throw new Error(body?.error?.message || body?.error || 'Unknown error');
}
const chunks = getSSEStreamAsync(fetchResponse);
*/
const client = new HfInference(hfToken);
const chunks = client.chatCompletionStream({
model: usingModel,
messages: [
{
role: 'user',
content: getPromptGeneratePodcastScript(input, note),
},
],
temperature: 0.3,
stream: true,
provider: CONFIG.inferenceProvider,
});
for await (const chunk of chunks) {
// const stop = chunk.stop;
//if (chunk.error) {
// throw new Error(chunk.error?.message || 'Unknown error');
//}
const addedContent = chunk.choices[0].delta.content;
responseContent += addedContent;
const { thought, codeBlock } = splitContent(responseContent);
setThought(thought);
if (codeBlock.length > 0) {
setScript(codeBlock);
}
}
} catch (error) {
console.error(error);
alert(`ERROR: ${error}`);
}
setIsGenerating(false);
setTimeout(() => {
const generatePodcastBtn = document.getElementById(
'btn-generate-podcast'
);
generatePodcastBtn?.click();
}, 50);
};
return (
<div className="card bg-base-100 w-full shadow-xl">
<div className="card-body">
<h2 className="card-title">Step 1: Input information</h2>
<select
className="select select-bordered w-full"
disabled={isGenerating || busy}
onChange={(e) => {
const idx = parseInt(e.target.value);
const ex = EXAMPLES[idx];
if (ex) {
setInput(ex.input);
setNote(ex.note);
}
}}
>
<option selected disabled value={-1}>
Try one of these examples!!
</option>
{EXAMPLES.map((example, index) => (
<option key={index} value={index}>
{example.name}
</option>
))}
</select>
{isBlogMode && (
<>
<input
type="text"
placeholder="Blog URL"
className="input input-bordered w-full"
onChange={(e) => setBlogURL(e.target.value)}
/>
</>
)}
<textarea
className="textarea textarea-bordered w-full h-72 p-2"
placeholder="Type your input information here (an article, a document, etc)..."
value={input}
onChange={(e) => setInput(e.target.value)}
disabled={isGenerating || busy}
></textarea>
<textarea
className="textarea textarea-bordered w-full h-24 p-2"
placeholder="Optional note (the theme, tone, etc)..."
value={note}
onChange={(e) => setNote(e.target.value)}
disabled={isGenerating || busy}
></textarea>
<select
className="select select-bordered"
value={model}
onChange={(e) => setModel(e.target.value)}
disabled={isGenerating || busy}
>
{CONFIG.inferenceProviderModels.map((s) => (
<option key={s} value={s}>
{s}
</option>
))}
<option value="custom">Custom</option>
</select>
{model === 'custom' && (
<input
type="text"
placeholder="Use a custom model from HF Hub (must be supported by Inference Providers)"
className="input input-bordered w-full"
value={customModel}
onChange={(e) => setCustomModel(e.target.value)}
/>
)}
{thought.length > 0 && (
<>
<p>Thought process:</p>
<textarea
className="textarea textarea-bordered w-full h-24 p-2"
value={thought}
ref={refThought}
readOnly
></textarea>
</>
)}
<button
className="btn btn-primary mt-2"
onClick={generate}
disabled={isGenerating || busy || input.length < 10}
>
{isGenerating ? (
<>
<span className="loading loading-spinner loading-sm"></span>
Generating...
</>
) : (
'Generate script'
)}
</button>
<div role="alert" className="alert text-sm">
<svg
xmlns="http://www.w3.org/2000/svg"
fill="none"
viewBox="0 0 24 24"
className="stroke-info h-6 w-6 shrink-0"
>
<path
strokeLinecap="round"
strokeLinejoin="round"
strokeWidth="2"
d="M13 16h-1v-4h-1m1-4h.01M21 12a9 9 0 11-18 0 9 9 0 0118 0z"
></path>
</svg>
<span>
The LLM may generate an incorrect YAML. If it fails on Step 2,
re-generate the script or adding a note to force it to follow YAML
format.
</span>
</div>
</div>
</div>
);
};