Spaces:
Running
Running
Commit
Β·
56c6de1
1
Parent(s):
83476de
Make all LLM requests async using google genai aio client
Browse files
demo.py
CHANGED
@@ -29,7 +29,7 @@ from typing import List, Tuple
|
|
29 |
|
30 |
import gradio as gr
|
31 |
from google import genai
|
32 |
-
from google.genai.chats import Chat
|
33 |
from google.genai.types import GenerateContentConfig, ThinkingConfig, UploadFileConfig
|
34 |
|
35 |
from manim_video_generator.video_executor import VideoExecutor # type: ignore
|
@@ -66,9 +66,9 @@ class ThinkingStreamPart(StreamPart): pass
|
|
66 |
class TextStreamPart(StreamPart): pass
|
67 |
|
68 |
|
69 |
-
def stream_parts(chat
|
70 |
cfg = GenerateContentConfig(thinking_config=ThinkingConfig(include_thoughts=True))
|
71 |
-
for chunk in chat.send_message_stream(prompt, config=cfg):
|
72 |
if chunk.candidates:
|
73 |
cand = chunk.candidates[0]
|
74 |
if cand.content and cand.content.parts:
|
@@ -90,7 +90,7 @@ def extract_python(md: str) -> str:
|
|
90 |
|
91 |
class Session(dict):
|
92 |
phase: str # await_task | coding_loop | review_loop | finished
|
93 |
-
chat:
|
94 |
last_video: Path | None
|
95 |
|
96 |
def __init__(self):
|
@@ -112,9 +112,9 @@ async def chat_handler(user_msg: str, history: List[Tuple[str, str]], state: Ses
|
|
112 |
if state.phase == "await_task":
|
113 |
if not state.chat:
|
114 |
# First time - create chat and generate scenario
|
115 |
-
state.chat = client.chats.create(model=MODEL)
|
116 |
scenario_prompt = f"{SYSTEM_PROMPT_SCENARIO_GENERATOR}\n\n{user_msg}"
|
117 |
-
for txt in stream_parts(state.chat, scenario_prompt):
|
118 |
append_bot_chunk(history, txt.text)
|
119 |
yield history, state, state.last_video
|
120 |
await asyncio.sleep(0)
|
@@ -128,7 +128,7 @@ async def chat_handler(user_msg: str, history: List[Tuple[str, str]], state: Ses
|
|
128 |
state.phase = "coding_loop"
|
129 |
else:
|
130 |
# User wants to discuss/modify scenario
|
131 |
-
for chunk in stream_parts(state.chat, user_msg):
|
132 |
append_bot_chunk(history, chunk.text)
|
133 |
yield history, state, state.last_video
|
134 |
await asyncio.sleep(0)
|
@@ -142,12 +142,11 @@ async def chat_handler(user_msg: str, history: List[Tuple[str, str]], state: Ses
|
|
142 |
|
143 |
# ββ Coding loop βββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
|
144 |
if state.phase == "coding_loop":
|
145 |
-
|
146 |
-
prompt = "Thanks. It is good scenario. Now generate code for it.\n\n" + SYSTEM_PROMPT_CODEGEN
|
147 |
|
148 |
while True: # keep cycling until render succeeds
|
149 |
# 1. Ask for code
|
150 |
-
for chunk in stream_parts(state.chat, prompt):
|
151 |
append_bot_chunk(history, chunk.text)
|
152 |
yield history, state, state.last_video
|
153 |
await asyncio.sleep(0)
|
@@ -198,7 +197,7 @@ async def chat_handler(user_msg: str, history: List[Tuple[str, str]], state: Ses
|
|
198 |
# 4. Review
|
199 |
review_prompt = [file_ref, REVIEW_PROMPT]
|
200 |
add_user_msg(history, "# system β review video")
|
201 |
-
for chunk in stream_parts(state.chat, review_prompt):
|
202 |
append_bot_chunk(history, chunk.text)
|
203 |
yield history, state, state.last_video
|
204 |
await asyncio.sleep(0)
|
|
|
29 |
|
30 |
import gradio as gr
|
31 |
from google import genai
|
32 |
+
from google.genai.chats import Chat, AsyncChat
|
33 |
from google.genai.types import GenerateContentConfig, ThinkingConfig, UploadFileConfig
|
34 |
|
35 |
from manim_video_generator.video_executor import VideoExecutor # type: ignore
|
|
|
66 |
class TextStreamPart(StreamPart): pass
|
67 |
|
68 |
|
69 |
+
async def stream_parts(chat, prompt):
|
70 |
cfg = GenerateContentConfig(thinking_config=ThinkingConfig(include_thoughts=True))
|
71 |
+
async for chunk in await chat.send_message_stream(prompt, config=cfg):
|
72 |
if chunk.candidates:
|
73 |
cand = chunk.candidates[0]
|
74 |
if cand.content and cand.content.parts:
|
|
|
90 |
|
91 |
class Session(dict):
|
92 |
phase: str # await_task | coding_loop | review_loop | finished
|
93 |
+
chat: AsyncChat | None
|
94 |
last_video: Path | None
|
95 |
|
96 |
def __init__(self):
|
|
|
112 |
if state.phase == "await_task":
|
113 |
if not state.chat:
|
114 |
# First time - create chat and generate scenario
|
115 |
+
state.chat = client.aio.chats.create(model=MODEL)
|
116 |
scenario_prompt = f"{SYSTEM_PROMPT_SCENARIO_GENERATOR}\n\n{user_msg}"
|
117 |
+
async for txt in stream_parts(state.chat, scenario_prompt):
|
118 |
append_bot_chunk(history, txt.text)
|
119 |
yield history, state, state.last_video
|
120 |
await asyncio.sleep(0)
|
|
|
128 |
state.phase = "coding_loop"
|
129 |
else:
|
130 |
# User wants to discuss/modify scenario
|
131 |
+
async for chunk in stream_parts(state.chat, user_msg):
|
132 |
append_bot_chunk(history, chunk.text)
|
133 |
yield history, state, state.last_video
|
134 |
await asyncio.sleep(0)
|
|
|
142 |
|
143 |
# ββ Coding loop βββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
|
144 |
if state.phase == "coding_loop":
|
145 |
+
prompt = "Thanks. It is good scenario. Now generate code for it.\n\n" + SYSTEM_PROMPT_CODEGEN
|
|
|
146 |
|
147 |
while True: # keep cycling until render succeeds
|
148 |
# 1. Ask for code
|
149 |
+
async for chunk in stream_parts(state.chat, prompt):
|
150 |
append_bot_chunk(history, chunk.text)
|
151 |
yield history, state, state.last_video
|
152 |
await asyncio.sleep(0)
|
|
|
197 |
# 4. Review
|
198 |
review_prompt = [file_ref, REVIEW_PROMPT]
|
199 |
add_user_msg(history, "# system β review video")
|
200 |
+
async for chunk in stream_parts(state.chat, review_prompt):
|
201 |
append_bot_chunk(history, chunk.text)
|
202 |
yield history, state, state.last_video
|
203 |
await asyncio.sleep(0)
|