Spaces:
Sleeping
Sleeping
Commit
·
481f639
1
Parent(s):
1c66134
Fix scenario feedback logic: allow users to modify scenario before code generation
Browse files
demo.py
CHANGED
@@ -8,8 +8,8 @@
|
|
8 |
• **Ошибки рендера** публикуются *как пользовательское сообщение* и немедленно
|
9 |
отправляются в Gemini; модель отвечает, мы снова пытаемся сгенерировать код —
|
10 |
полностью автоматический цикл, как в вашем CLI‑скрипте.
|
11 |
-
• Управление состоянием сведено к чётким этапам: `await_task`, `
|
12 |
-
`
|
13 |
|
14 |
Запуск:
|
15 |
```bash
|
@@ -89,7 +89,7 @@ def extract_python(md: str) -> str:
|
|
89 |
# ────────────────────────── Session state ────────────────────────────────────
|
90 |
|
91 |
class Session(dict):
|
92 |
-
phase: str # await_task |
|
93 |
chat: Chat | None
|
94 |
last_video: Path | None
|
95 |
|
@@ -110,18 +110,33 @@ async def chat_handler(user_msg: str, history: List[Tuple[str, str]], state: Ses
|
|
110 |
|
111 |
# bootstrap chat on very first user request
|
112 |
if state.phase == "await_task":
|
113 |
-
state.chat
|
114 |
-
|
115 |
-
|
116 |
-
|
117 |
-
|
|
|
|
|
|
|
|
|
118 |
yield history, state, state.last_video
|
119 |
-
|
120 |
-
|
121 |
-
|
122 |
-
|
123 |
-
|
124 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
125 |
|
126 |
# later phases require chat obj
|
127 |
if not state.chat:
|
@@ -129,39 +144,16 @@ async def chat_handler(user_msg: str, history: List[Tuple[str, str]], state: Ses
|
|
129 |
yield history, state, state.last_video
|
130 |
return
|
131 |
|
132 |
-
# ──
|
133 |
-
if state.phase == "
|
134 |
-
if user_msg.strip().lower() in {"c", "continue", "с"}:
|
135 |
-
#
|
136 |
-
state.phase = "coding_loop"
|
137 |
-
append_bot_chunk(history, "✅ Great! Proceeding to code generation...")
|
138 |
-
yield history, state, state.last_video
|
139 |
-
|
140 |
-
# Start code generation
|
141 |
prompt = "Thanks. It is good scenario. Now generate code for it.\n\n" + SYSTEM_PROMPT_CODEGEN
|
142 |
-
for chunk in stream_parts(state.chat, prompt):
|
143 |
-
append_bot_chunk(history, chunk.text)
|
144 |
-
yield history, state, state.last_video
|
145 |
-
await asyncio.sleep(0)
|
146 |
-
return
|
147 |
else:
|
148 |
-
|
149 |
-
edit_prompt = f"Please modify the scenario based on this feedback: {user_msg}"
|
150 |
-
for txt in stream_parts(state.chat, edit_prompt):
|
151 |
-
append_bot_chunk(history, txt.text)
|
152 |
-
yield history, state, state.last_video
|
153 |
-
await asyncio.sleep(0)
|
154 |
-
|
155 |
-
append_bot_chunk(history, "\n\n*(type **continue** to proceed with this updated scenario, or provide more edits)*")
|
156 |
-
yield history, state, state.last_video
|
157 |
-
return
|
158 |
|
159 |
-
# ── Coding loop ─────────────────────────────────────────────────────────────
|
160 |
-
if state.phase == "coding_loop":
|
161 |
-
# This phase now only handles coding errors and regeneration
|
162 |
-
prompt = user_msg # User message should be error feedback
|
163 |
while True: # keep cycling until render succeeds
|
164 |
-
# 1. Ask for code
|
165 |
for chunk in stream_parts(state.chat, prompt):
|
166 |
append_bot_chunk(history, chunk.text)
|
167 |
yield history, state, state.last_video
|
|
|
8 |
• **Ошибки рендера** публикуются *как пользовательское сообщение* и немедленно
|
9 |
отправляются в Gemini; модель отвечает, мы снова пытаемся сгенерировать код —
|
10 |
полностью автоматический цикл, как в вашем CLI‑скрипте.
|
11 |
+
• Управление состоянием сведено к чётким этапам: `await_task`, `coding_loop`,
|
12 |
+
`review_loop`, `finished`.
|
13 |
|
14 |
Запуск:
|
15 |
```bash
|
|
|
89 |
# ────────────────────────── Session state ────────────────────────────────────
|
90 |
|
91 |
class Session(dict):
|
92 |
+
phase: str # await_task | coding_loop | review_loop | finished
|
93 |
chat: Chat | None
|
94 |
last_video: Path | None
|
95 |
|
|
|
110 |
|
111 |
# bootstrap chat on very first user request
|
112 |
if state.phase == "await_task":
|
113 |
+
if not state.chat:
|
114 |
+
# First time - create chat and generate scenario
|
115 |
+
state.chat = client.chats.create(model=MODEL)
|
116 |
+
scenario_prompt = f"{SYSTEM_PROMPT_SCENARIO_GENERATOR}\n\n{user_msg}"
|
117 |
+
for txt in stream_parts(state.chat, scenario_prompt):
|
118 |
+
append_bot_chunk(history, txt.text)
|
119 |
+
yield history, state, state.last_video
|
120 |
+
await asyncio.sleep(0)
|
121 |
+
append_bot_chunk(history, "\n\n*(type **continue** to proceed to code generation)*")
|
122 |
yield history, state, state.last_video
|
123 |
+
return
|
124 |
+
else:
|
125 |
+
# Chat exists - check if user wants to proceed or modify scenario
|
126 |
+
if user_msg.strip().lower() in {"c", "continue", "с"}:
|
127 |
+
# User is ready to proceed to code generation
|
128 |
+
state.phase = "coding_loop"
|
129 |
+
prompt = "Thanks. It is good scenario. Now generate code for it.\n\n" + SYSTEM_PROMPT_CODEGEN
|
130 |
+
# Continue to coding_loop logic below
|
131 |
+
else:
|
132 |
+
# User wants to discuss/modify scenario
|
133 |
+
for chunk in stream_parts(state.chat, user_msg):
|
134 |
+
append_bot_chunk(history, chunk.text)
|
135 |
+
yield history, state, state.last_video
|
136 |
+
await asyncio.sleep(0)
|
137 |
+
append_bot_chunk(history, "\n\n*(type **continue** when ready to proceed to code generation)*")
|
138 |
+
yield history, state, state.last_video
|
139 |
+
return
|
140 |
|
141 |
# later phases require chat obj
|
142 |
if not state.chat:
|
|
|
144 |
yield history, state, state.last_video
|
145 |
return
|
146 |
|
147 |
+
# ── Coding loop ─────────────────────────────────────────────────────────────
|
148 |
+
if state.phase == "coding_loop":
|
149 |
+
if not user_msg.strip().lower() in {"c", "continue", "с"}:
|
150 |
+
# This should not happen anymore since we handle it in await_task
|
|
|
|
|
|
|
|
|
|
|
151 |
prompt = "Thanks. It is good scenario. Now generate code for it.\n\n" + SYSTEM_PROMPT_CODEGEN
|
|
|
|
|
|
|
|
|
|
|
152 |
else:
|
153 |
+
prompt = "Thanks. It is good scenario. Now generate code for it.\n\n" + SYSTEM_PROMPT_CODEGEN
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
154 |
|
|
|
|
|
|
|
|
|
155 |
while True: # keep cycling until render succeeds
|
156 |
+
# 1. Ask for code
|
157 |
for chunk in stream_parts(state.chat, prompt):
|
158 |
append_bot_chunk(history, chunk.text)
|
159 |
yield history, state, state.last_video
|