freQuensy23 commited on
Commit
1c66134
·
1 Parent(s): e5492fa

Add scenario editing capability - allow users to modify scenario before code generation

Browse files
Files changed (1) hide show
  1. demo.py +35 -13
demo.py CHANGED
@@ -8,8 +8,8 @@
8
  • **Ошибки рендера** публикуются *как пользовательское сообщение* и немедленно
9
  отправляются в Gemini; модель отвечает, мы снова пытаемся сгенерировать код —
10
  полностью автоматический цикл, как в вашем CLI‑скрипте.
11
- • Управление состоянием сведено к чётким этапам: `await_task`, `coding_loop`,
12
- `review_loop`, `finished`.
13
 
14
  Запуск:
15
  ```bash
@@ -89,7 +89,7 @@ def extract_python(md: str) -> str:
89
  # ────────────────────────── Session state ────────────────────────────────────
90
 
91
  class Session(dict):
92
- phase: str # await_task | coding_loop | review_loop | finished
93
  chat: Chat | None
94
  last_video: Path | None
95
 
@@ -118,8 +118,8 @@ async def chat_handler(user_msg: str, history: List[Tuple[str, str]], state: Ses
118
  yield history, state, state.last_video
119
  await asyncio.sleep(0)
120
 
121
- append_bot_chunk(history, "\n\n*(type **continue** to proceed to code generation)*")
122
- state.phase = "coding_loop"
123
  yield history, state, state.last_video
124
  return
125
 
@@ -129,17 +129,39 @@ async def chat_handler(user_msg: str, history: List[Tuple[str, str]], state: Ses
129
  yield history, state, state.last_video
130
  return
131
 
132
- # ── Coding loop ─────────────────────────────────────────────────────────────
133
- if state.phase == "coding_loop":
134
- if user_msg.strip().lower() not in {"c", "continue", "с"}:
135
- append_bot_chunk(history, "⚠️ Type **continue** to move on.")
 
 
136
  yield history, state, state.last_video
 
 
 
 
 
 
 
137
  return
138
- prompt = (
139
- "Thanks. It is good scenario. Now generate code for it.\n\n" + SYSTEM_PROMPT_CODEGEN
140
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
141
  while True: # keep cycling until render succeeds
142
- # 1. Ask for code
143
  for chunk in stream_parts(state.chat, prompt):
144
  append_bot_chunk(history, chunk.text)
145
  yield history, state, state.last_video
 
8
  • **Ошибки рендера** публикуются *как пользовательское сообщение* и немедленно
9
  отправляются в Gemini; модель отвечает, мы снова пытаемся сгенерировать код —
10
  полностью автоматический цикл, как в вашем CLI‑скрипте.
11
+ • Управление состоянием сведено к чётким этапам: `await_task`, `scenario_edit`,
12
+ `coding_loop`, `review_loop`, `finished`.
13
 
14
  Запуск:
15
  ```bash
 
89
  # ────────────────────────── Session state ────────────────────────────────────
90
 
91
  class Session(dict):
92
+ phase: str # await_task | scenario_edit | coding_loop | review_loop | finished
93
  chat: Chat | None
94
  last_video: Path | None
95
 
 
118
  yield history, state, state.last_video
119
  await asyncio.sleep(0)
120
 
121
+ append_bot_chunk(history, "\n\n*(type **continue** to proceed with this scenario, or provide edits/suggestions to modify it)*")
122
+ state.phase = "scenario_edit"
123
  yield history, state, state.last_video
124
  return
125
 
 
129
  yield history, state, state.last_video
130
  return
131
 
132
+ # ── Scenario editing phase ─────────────────────────────────────────────────
133
+ if state.phase == "scenario_edit":
134
+ if user_msg.strip().lower() in {"c", "continue", "с"}:
135
+ # User is happy with scenario, proceed to code generation
136
+ state.phase = "coding_loop"
137
+ append_bot_chunk(history, "✅ Great! Proceeding to code generation...")
138
  yield history, state, state.last_video
139
+
140
+ # Start code generation
141
+ prompt = "Thanks. It is good scenario. Now generate code for it.\n\n" + SYSTEM_PROMPT_CODEGEN
142
+ for chunk in stream_parts(state.chat, prompt):
143
+ append_bot_chunk(history, chunk.text)
144
+ yield history, state, state.last_video
145
+ await asyncio.sleep(0)
146
  return
147
+ else:
148
+ # User wants to modify the scenario
149
+ edit_prompt = f"Please modify the scenario based on this feedback: {user_msg}"
150
+ for txt in stream_parts(state.chat, edit_prompt):
151
+ append_bot_chunk(history, txt.text)
152
+ yield history, state, state.last_video
153
+ await asyncio.sleep(0)
154
+
155
+ append_bot_chunk(history, "\n\n*(type **continue** to proceed with this updated scenario, or provide more edits)*")
156
+ yield history, state, state.last_video
157
+ return
158
+
159
+ # ── Coding loop ─────────────────────────────────────────────────────────────
160
+ if state.phase == "coding_loop":
161
+ # This phase now only handles coding errors and regeneration
162
+ prompt = user_msg # User message should be error feedback
163
  while True: # keep cycling until render succeeds
164
+ # 1. Ask for code (or regenerate based on feedback)
165
  for chunk in stream_parts(state.chat, prompt):
166
  append_bot_chunk(history, chunk.text)
167
  yield history, state, state.last_video