import { useEffect, useRef, useState } from 'react'; import { CONFIG } from '../config'; import { getBlogPrompt, getPromptGeneratePodcastScript, } from '../utils/prompts'; //import { getSSEStreamAsync } from '../utils/utils'; import { EXAMPLES } from '../examples'; import { HfInference } from '@huggingface/inference'; import { isBlogMode } from '../utils/utils'; interface SplitContent { thought: string; codeBlock: string; } const getFromTo = (content: string, from: string, to: string): string => { const firstSplit = content.split(from, 2); if (firstSplit[1] !== undefined) { const secondSplit = firstSplit[1].split(to, 1); return secondSplit[0]; } else { return ''; } }; const splitContent = (content: string): SplitContent => { return { thought: getFromTo(content, '', '').trim(), codeBlock: getFromTo(content, '```yaml', '```').trim(), }; }; export const ScriptMaker = ({ setScript, setBlogURL, setBusy, busy, hfToken, }: { setScript: (script: string) => void; setBlogURL: (url: string) => void; setBusy: (busy: boolean) => void; busy: boolean; hfToken: string; }) => { const [model, setModel] = useState(CONFIG.inferenceProviderModels[0]); const [customModel, setCustomModel] = useState( CONFIG.inferenceProviderModels[0] ); const usingModel = model === 'custom' ? customModel : model; const [input, setInput] = useState(''); const [note, setNote] = useState(isBlogMode ? getBlogPrompt() : ''); const [thought, setThought] = useState(''); const [isGenerating, setIsGenerating] = useState(false); const refThought = useRef(null); useEffect(() => { setBusy(isGenerating); }, [isGenerating]); useEffect(() => { setTimeout(() => { // auto scroll if (refThought.current) { refThought.current.scrollTop = refThought.current.scrollHeight; } }, 10); }, [thought]); const generate = async () => { setIsGenerating(true); setThought(''); try { let responseContent = ''; /* const fetchResponse = await fetch(CONFIG.llmEndpoint, { method: 'POST', headers: { 'Content-Type': 'application/json', 'Authorization': `Bearer ${hfToken}`, }, body: JSON.stringify({ model: usingModel, messages: [ { role: 'user', content: getPromptGeneratePodcastScript(input, note), }, ], temperature: 0.3, stream: true, provider: CONFIG.inferenceProvider, }), }); if (fetchResponse.status !== 200) { const body = await fetchResponse.json(); throw new Error(body?.error?.message || body?.error || 'Unknown error'); } const chunks = getSSEStreamAsync(fetchResponse); */ const client = new HfInference(hfToken); const chunks = client.chatCompletionStream({ model: usingModel, messages: [ { role: 'user', content: getPromptGeneratePodcastScript(input, note), }, ], temperature: 0.3, stream: true, provider: CONFIG.inferenceProvider, }); for await (const chunk of chunks) { // const stop = chunk.stop; //if (chunk.error) { // throw new Error(chunk.error?.message || 'Unknown error'); //} const addedContent = chunk.choices[0].delta.content; responseContent += addedContent; const { thought, codeBlock } = splitContent(responseContent); setThought(thought); if (codeBlock.length > 0) { setScript(codeBlock); } } } catch (error) { console.error(error); alert(`ERROR: ${error}`); } setIsGenerating(false); setTimeout(() => { const generatePodcastBtn = document.getElementById( 'btn-generate-podcast' ); generatePodcastBtn?.click(); }, 50); }; return (

Step 1: Input information

{isBlogMode && ( <> setBlogURL(e.target.value)} /> )} {model === 'custom' && ( setCustomModel(e.target.value)} /> )} {thought.length > 0 && ( <>

Thought process:

)}
The LLM may generate an incorrect YAML. If it fails on Step 2, re-generate the script or adding a note to force it to follow YAML format.
); };