Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -2,13 +2,12 @@ import os
|
|
2 |
import gradio as gr
|
3 |
import google.generativeai as genai
|
4 |
import asyncio
|
5 |
-
import json
|
6 |
|
7 |
###############################################################################
|
8 |
# 1. Настройка окружения и инициализация моделей
|
9 |
###############################################################################
|
10 |
|
11 |
-
#
|
12 |
GEMINI_API_KEY = "AIzaSyBoqoPX-9uzvXyxzse0gRwH8_P9xO6O3Bc"
|
13 |
if not GEMINI_API_KEY:
|
14 |
print("Error: GEMINI_API_KEY is not set.")
|
@@ -16,14 +15,13 @@ if not GEMINI_API_KEY:
|
|
16 |
|
17 |
genai.configure(api_key=GEMINI_API_KEY)
|
18 |
|
19 |
-
#
|
20 |
AVAILABLE_MODELS = [
|
21 |
-
"gemini-2.0-flash-exp",
|
22 |
-
"gemini-exp-1206",
|
23 |
-
"gemini-2.0-flash-thinking-exp-1219",
|
24 |
]
|
25 |
|
26 |
-
# Инициализация
|
27 |
MODELS = {}
|
28 |
for model_name in AVAILABLE_MODELS:
|
29 |
try:
|
@@ -31,100 +29,84 @@ for model_name in AVAILABLE_MODELS:
|
|
31 |
except Exception as e:
|
32 |
print(f"[Предупреждение] Не удалось инициализировать модель {model_name}: {e}")
|
33 |
|
34 |
-
|
35 |
###############################################################################
|
36 |
-
# 2.
|
37 |
###############################################################################
|
|
|
38 |
|
39 |
-
|
40 |
"gemini-2.0-flash-exp": (
|
41 |
-
"You are a normal model. "
|
42 |
-
"
|
43 |
),
|
44 |
"gemini-exp-1206": (
|
45 |
-
"You are
|
46 |
-
"
|
47 |
),
|
48 |
"gemini-2.0-flash-thinking-exp-1219": (
|
49 |
-
"You are a thinking model. "
|
50 |
-
"
|
51 |
-
"You may
|
52 |
),
|
53 |
}
|
54 |
|
55 |
-
|
56 |
###############################################################################
|
57 |
-
# 3.
|
58 |
###############################################################################
|
59 |
|
60 |
def _assistant_role(model_name: str) -> str:
|
61 |
"""
|
62 |
-
|
63 |
-
а для остальных role="assistant".
|
64 |
-
Настраивайте эту логику под нужды своих моделей.
|
65 |
"""
|
66 |
-
|
|
|
67 |
return "model"
|
68 |
return "assistant"
|
69 |
|
70 |
-
|
71 |
###############################################################################
|
72 |
-
# 4.
|
73 |
###############################################################################
|
74 |
|
75 |
-
def
|
76 |
"""
|
77 |
-
Gradio хранит
|
78 |
-
|
79 |
-
|
80 |
"""
|
81 |
genai_history = []
|
82 |
-
|
83 |
-
|
84 |
-
# 1) Добавляем «системное» сообщение, если его ещё нет
|
85 |
-
# (Положим, он будет первым в истории, со специальной role="system").
|
86 |
-
# Некоторые модели могут не поддерживать role="system".
|
87 |
-
# Если так, придётся хранить это как user.
|
88 |
-
system_prompt = DEFAULT_SYSTEM_PROMPTS.get(model_name, "")
|
89 |
-
# Проверим, добавляли ли мы уже системный промпт (для простоты — ищем по role="system")
|
90 |
-
has_system = any(h.get("role") == "system" for h in genai_history)
|
91 |
-
|
92 |
-
# Если нет в genai_history, добавим
|
93 |
-
if system_prompt and not has_system:
|
94 |
-
# Вставляем первым сообщением
|
95 |
-
genai_history.append({"role": "system", "parts": system_prompt})
|
96 |
-
|
97 |
-
# 2) Далее преобразуем основную историю
|
98 |
for user_text, bot_text in history:
|
99 |
if user_text:
|
|
|
100 |
genai_history.append({"role": "user", "parts": user_text})
|
101 |
if bot_text:
|
102 |
-
|
103 |
-
|
104 |
return genai_history
|
105 |
|
106 |
-
|
107 |
###############################################################################
|
108 |
-
# 5.
|
109 |
###############################################################################
|
110 |
|
111 |
async def _respond_stream(model_name, user_message, history):
|
112 |
"""
|
113 |
-
|
|
|
114 |
"""
|
115 |
if model_name not in MODELS:
|
116 |
yield "Ошибка: модель не найдена."
|
117 |
return
|
118 |
|
119 |
model = MODELS[model_name]
|
120 |
-
genai_history =
|
121 |
|
122 |
try:
|
123 |
chat = model.start_chat(history=genai_history)
|
124 |
-
|
125 |
|
126 |
partial_text = ""
|
127 |
-
for chunk in
|
128 |
partial_text += (chunk.text or "")
|
129 |
yield partial_text
|
130 |
|
@@ -136,19 +118,18 @@ async def _respond_stream(model_name, user_message, history):
|
|
136 |
|
137 |
async def _respond_thinking(model_name, user_message, history):
|
138 |
"""
|
139 |
-
|
140 |
-
|
141 |
-
|
142 |
-
- Выделяем 'размышления' (if p.thought == True) для thinking_output.
|
143 |
"""
|
144 |
if model_name not in MODELS:
|
145 |
yield "Ошибка: модель не найдена.", ""
|
146 |
return
|
147 |
|
148 |
model = MODELS[model_name]
|
149 |
-
genai_history =
|
150 |
|
151 |
-
#
|
152 |
yield "Думаю...", ""
|
153 |
|
154 |
try:
|
@@ -161,55 +142,51 @@ async def _respond_thinking(model_name, user_message, history):
|
|
161 |
if response.candidates:
|
162 |
parts = response.candidates[0].content.parts
|
163 |
for p in parts:
|
164 |
-
# Если это "размышления"
|
165 |
if getattr(p, "thought", False):
|
166 |
thinking_process_text += p.text or ""
|
167 |
else:
|
168 |
final_text += p.text or ""
|
169 |
|
170 |
-
#
|
171 |
-
|
172 |
-
# Но вдруг нужно «подстраховаться» вручную.
|
173 |
-
# В зависимости от задачи:
|
174 |
-
# final_text = json.dumps({"output": final_text}, ensure_ascii=False)
|
175 |
|
176 |
-
yield
|
177 |
return
|
178 |
|
179 |
except Exception as e:
|
180 |
yield f"Ошибка при запросе к API: {e}", ""
|
181 |
return
|
182 |
|
183 |
-
|
184 |
###############################################################################
|
185 |
-
# 6. Основная
|
186 |
###############################################################################
|
187 |
|
188 |
-
async def user_send_message(
|
|
|
|
|
|
|
|
|
|
|
189 |
"""
|
190 |
-
|
191 |
-
|
192 |
-
user_message : новая реплика пользователя
|
193 |
-
history : [(user, assistant), ...]
|
194 |
-
model_name : выбранная модель
|
195 |
-
thinking_text: текущее «размышление» (в отдельном Textbox)
|
196 |
"""
|
197 |
-
#
|
198 |
if not user_message.strip():
|
199 |
-
# Просто ничего не делаем
|
200 |
yield history, thinking_text
|
201 |
return
|
202 |
|
203 |
-
# Добавляем (user_message, None)
|
204 |
history.append((user_message, None))
|
205 |
|
206 |
-
# Если модель thinking
|
207 |
if "thinking" in model_name.lower():
|
208 |
async for (assistant_text, thought_text) in _respond_thinking(model_name, user_message, history):
|
209 |
history[-1] = (user_message, assistant_text)
|
210 |
yield history, thought_text
|
211 |
return
|
212 |
else:
|
|
|
213 |
partial_answer = ""
|
214 |
async for chunk in _respond_stream(model_name, user_message, history):
|
215 |
partial_answer = chunk
|
@@ -217,37 +194,179 @@ async def user_send_message(user_message, history, model_name, thinking_text):
|
|
217 |
yield history, ""
|
218 |
return
|
219 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
220 |
|
221 |
###############################################################################
|
222 |
-
#
|
223 |
###############################################################################
|
224 |
|
225 |
-
def
|
226 |
"""
|
227 |
-
|
228 |
-
|
229 |
-
чтобы модель знала о переключении.
|
230 |
"""
|
231 |
-
history.
|
232 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
233 |
)
|
234 |
-
|
|
|
|
|
|
|
235 |
|
|
|
236 |
|
237 |
###############################################################################
|
238 |
-
#
|
239 |
###############################################################################
|
240 |
|
241 |
-
def
|
242 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
243 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
244 |
|
245 |
###############################################################################
|
246 |
-
#
|
247 |
###############################################################################
|
248 |
|
249 |
with gr.Blocks() as demo:
|
250 |
-
gr.Markdown("## Chat с Gemini
|
251 |
|
252 |
with gr.Row():
|
253 |
model_dropdown = gr.Dropdown(
|
@@ -257,43 +376,57 @@ with gr.Blocks() as demo:
|
|
257 |
)
|
258 |
clear_button = gr.Button("Очистить чат")
|
259 |
|
260 |
-
# Состояние для хранения истории
|
261 |
history_state = gr.State([])
|
262 |
-
# Состояние для «размышлений»
|
263 |
thinking_store = gr.State("")
|
264 |
|
265 |
chatbot = gr.Chatbot(label="Диалог с Gemini")
|
266 |
user_input = gr.Textbox(label="Ваш вопрос", placeholder="Введите текст...")
|
|
|
|
|
267 |
|
268 |
-
|
269 |
-
|
270 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
271 |
)
|
272 |
|
273 |
-
|
274 |
-
|
275 |
-
|
276 |
send_chain = send_btn.click(
|
277 |
-
fn=
|
278 |
inputs=[user_input, history_state, model_dropdown, thinking_store],
|
279 |
outputs=[history_state, thinking_store],
|
280 |
queue=True
|
281 |
)
|
282 |
-
# Обновляем чат
|
283 |
send_chain.then(
|
284 |
fn=lambda h: h,
|
285 |
inputs=[history_state],
|
286 |
outputs=[chatbot],
|
287 |
queue=True
|
288 |
)
|
289 |
-
# Обновляем поле «Размышления»
|
290 |
send_chain.then(
|
291 |
fn=lambda t: t,
|
292 |
inputs=[thinking_store],
|
293 |
outputs=[thinking_output],
|
294 |
queue=True
|
295 |
)
|
296 |
-
#
|
297 |
send_chain.then(
|
298 |
fn=lambda: "",
|
299 |
inputs=[],
|
@@ -301,9 +434,11 @@ with gr.Blocks() as demo:
|
|
301 |
queue=False
|
302 |
)
|
303 |
|
304 |
-
|
|
|
|
|
305 |
submit_chain = user_input.submit(
|
306 |
-
fn=
|
307 |
inputs=[user_input, history_state, model_dropdown, thinking_store],
|
308 |
outputs=[history_state, thinking_store],
|
309 |
queue=True
|
@@ -320,6 +455,7 @@ with gr.Blocks() as demo:
|
|
320 |
outputs=[thinking_output],
|
321 |
queue=True
|
322 |
)
|
|
|
323 |
submit_chain.then(
|
324 |
fn=lambda: "",
|
325 |
inputs=[],
|
@@ -327,19 +463,9 @@ with gr.Blocks() as demo:
|
|
327 |
queue=False
|
328 |
)
|
329 |
|
330 |
-
|
331 |
-
|
332 |
-
|
333 |
-
inputs=[model_dropdown, history_state],
|
334 |
-
outputs=[history_state],
|
335 |
-
queue=False
|
336 |
-
).then(
|
337 |
-
fn=lambda h: h, # обновим Chatbot
|
338 |
-
inputs=[history_state],
|
339 |
-
outputs=[chatbot]
|
340 |
-
)
|
341 |
-
|
342 |
-
# 4) Кнопка «Очистить чат»
|
343 |
clear_chain = clear_button.click(
|
344 |
fn=clear_all,
|
345 |
inputs=[],
|
@@ -355,11 +481,13 @@ with gr.Blocks() as demo:
|
|
355 |
fn=lambda _: "",
|
356 |
inputs=[],
|
357 |
outputs=[thinking_output]
|
358 |
-
)
|
|
|
359 |
fn=lambda: "",
|
360 |
inputs=[],
|
361 |
outputs=[user_input]
|
362 |
)
|
363 |
|
|
|
364 |
if __name__ == "__main__":
|
365 |
demo.launch()
|
|
|
2 |
import gradio as gr
|
3 |
import google.generativeai as genai
|
4 |
import asyncio
|
|
|
5 |
|
6 |
###############################################################################
|
7 |
# 1. Настройка окружения и инициализация моделей
|
8 |
###############################################################################
|
9 |
|
10 |
+
# Подставьте свой ключ или берите из окружения
|
11 |
GEMINI_API_KEY = "AIzaSyBoqoPX-9uzvXyxzse0gRwH8_P9xO6O3Bc"
|
12 |
if not GEMINI_API_KEY:
|
13 |
print("Error: GEMINI_API_KEY is not set.")
|
|
|
15 |
|
16 |
genai.configure(api_key=GEMINI_API_KEY)
|
17 |
|
18 |
+
# Выберите доступные модели (пример)
|
19 |
AVAILABLE_MODELS = [
|
20 |
+
"gemini-2.0-flash-exp",
|
21 |
+
"gemini-exp-1206",
|
22 |
+
"gemini-2.0-flash-thinking-exp-1219",
|
23 |
]
|
24 |
|
|
|
25 |
MODELS = {}
|
26 |
for model_name in AVAILABLE_MODELS:
|
27 |
try:
|
|
|
29 |
except Exception as e:
|
30 |
print(f"[Предупреждение] Не удалось инициализировать модель {model_name}: {e}")
|
31 |
|
|
|
32 |
###############################################################################
|
33 |
+
# 2. Дефолтные промпты (developer role) для каждой модели
|
34 |
###############################################################################
|
35 |
+
# Когда пользователь переключается на модель, мы добавляем это сообщение в историю.
|
36 |
|
37 |
+
DEFAULT_DEVELOPER_PROMPTS = {
|
38 |
"gemini-2.0-flash-exp": (
|
39 |
+
"You are a normal model (developer role). "
|
40 |
+
"Provide direct answers with no JSON wrapping."
|
41 |
),
|
42 |
"gemini-exp-1206": (
|
43 |
+
"You are an experimental normal model (developer role). "
|
44 |
+
"Provide direct answers with no JSON wrapping."
|
45 |
),
|
46 |
"gemini-2.0-flash-thinking-exp-1219": (
|
47 |
+
"You are a thinking model (developer role). "
|
48 |
+
"Please provide your final answer in the format {output: ...}. "
|
49 |
+
"You may use internal thoughts but do not show them directly to the user."
|
50 |
),
|
51 |
}
|
52 |
|
|
|
53 |
###############################################################################
|
54 |
+
# 3. Функция для определения роли ассистента (assistant vs model)
|
55 |
###############################################################################
|
56 |
|
57 |
def _assistant_role(model_name: str) -> str:
|
58 |
"""
|
59 |
+
Некоторые новые модели не принимают 'assistant', а требуют 'model'.
|
|
|
|
|
60 |
"""
|
61 |
+
# Допустим "gemini-exp-1206" и "gemini-2.0-flash-thinking-exp-1219" хотят "model"
|
62 |
+
if model_name in ["gemini-exp-1206", "gemini-2.0-flash-thinking-exp-1219"]:
|
63 |
return "model"
|
64 |
return "assistant"
|
65 |
|
|
|
66 |
###############################################################################
|
67 |
+
# 4. Преобразование истории из Gradio в формат Generative AI
|
68 |
###############################################################################
|
69 |
|
70 |
+
def _history_to_genai(history, model_name):
|
71 |
"""
|
72 |
+
Gradio хранит [(user_msg, bot_msg), ...].
|
73 |
+
Google GenAI ждёт [{"role": "...", "parts": "..."}].
|
74 |
+
Дополнительно учитываем developer-сообщения, которые мы вставляем.
|
75 |
"""
|
76 |
genai_history = []
|
77 |
+
asst_role = _assistant_role(model_name)
|
78 |
+
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
79 |
for user_text, bot_text in history:
|
80 |
if user_text:
|
81 |
+
# Пользовательское сообщение
|
82 |
genai_history.append({"role": "user", "parts": user_text})
|
83 |
if bot_text:
|
84 |
+
# Ответ ассистента (или model)
|
85 |
+
genai_history.append({"role": asst_role, "parts": bot_text})
|
86 |
return genai_history
|
87 |
|
|
|
88 |
###############################################################################
|
89 |
+
# 5. Генераторы для стрима обычных моделей и "thinking" моделей
|
90 |
###############################################################################
|
91 |
|
92 |
async def _respond_stream(model_name, user_message, history):
|
93 |
"""
|
94 |
+
Стриминговый ответ для обычных моделей:
|
95 |
+
- Кусочек за кусочком (partial_text).
|
96 |
"""
|
97 |
if model_name not in MODELS:
|
98 |
yield "Ошибка: модель не найдена."
|
99 |
return
|
100 |
|
101 |
model = MODELS[model_name]
|
102 |
+
genai_history = _history_to_genai(history, model_name)
|
103 |
|
104 |
try:
|
105 |
chat = model.start_chat(history=genai_history)
|
106 |
+
stream = chat.send_message(user_message, stream=True)
|
107 |
|
108 |
partial_text = ""
|
109 |
+
for chunk in stream:
|
110 |
partial_text += (chunk.text or "")
|
111 |
yield partial_text
|
112 |
|
|
|
118 |
|
119 |
async def _respond_thinking(model_name, user_message, history):
|
120 |
"""
|
121 |
+
Для thinking-моделей:
|
122 |
+
1) Выводим "Думаю..."
|
123 |
+
2) После завершения — финальный ответ в формате {output: ...} + размышления.
|
|
|
124 |
"""
|
125 |
if model_name not in MODELS:
|
126 |
yield "Ошибка: модель не найдена.", ""
|
127 |
return
|
128 |
|
129 |
model = MODELS[model_name]
|
130 |
+
genai_history = _history_to_genai(history, model_name)
|
131 |
|
132 |
+
# Сначала "Думаю..."
|
133 |
yield "Думаю...", ""
|
134 |
|
135 |
try:
|
|
|
142 |
if response.candidates:
|
143 |
parts = response.candidates[0].content.parts
|
144 |
for p in parts:
|
|
|
145 |
if getattr(p, "thought", False):
|
146 |
thinking_process_text += p.text or ""
|
147 |
else:
|
148 |
final_text += p.text or ""
|
149 |
|
150 |
+
# Для thinking-моделей просили итоговый ответ в {output: ...}
|
151 |
+
final_text_formatted = f"{{output: {final_text}}}"
|
|
|
|
|
|
|
152 |
|
153 |
+
yield final_text_formatted, thinking_process_text
|
154 |
return
|
155 |
|
156 |
except Exception as e:
|
157 |
yield f"Ошибка при запросе к API: {e}", ""
|
158 |
return
|
159 |
|
|
|
160 |
###############################################################################
|
161 |
+
# 6. Основная функция для ввода пользователя
|
162 |
###############################################################################
|
163 |
|
164 |
+
async def user_send_message(
|
165 |
+
user_message: str,
|
166 |
+
history: list[tuple[str, str]],
|
167 |
+
model_name: str,
|
168 |
+
thinking_text: str
|
169 |
+
):
|
170 |
"""
|
171 |
+
Колбэк, когда пользователь отправляет запрос.
|
172 |
+
Добавляем в history новый (user_msg, None), затем генерируем ответ.
|
|
|
|
|
|
|
|
|
173 |
"""
|
174 |
+
# Пустой ввод
|
175 |
if not user_message.strip():
|
|
|
176 |
yield history, thinking_text
|
177 |
return
|
178 |
|
179 |
+
# Добавляем (user_message, None)
|
180 |
history.append((user_message, None))
|
181 |
|
182 |
+
# Если модель — thinking
|
183 |
if "thinking" in model_name.lower():
|
184 |
async for (assistant_text, thought_text) in _respond_thinking(model_name, user_message, history):
|
185 |
history[-1] = (user_message, assistant_text)
|
186 |
yield history, thought_text
|
187 |
return
|
188 |
else:
|
189 |
+
# Обычная модель
|
190 |
partial_answer = ""
|
191 |
async for chunk in _respond_stream(model_name, user_message, history):
|
192 |
partial_answer = chunk
|
|
|
194 |
yield history, ""
|
195 |
return
|
196 |
|
197 |
+
###############################################################################
|
198 |
+
# 7. Очистка диалога
|
199 |
+
###############################################################################
|
200 |
+
|
201 |
+
def clear_all():
|
202 |
+
"""Сброс истории и размышлений."""
|
203 |
+
return [], ""
|
204 |
|
205 |
###############################################################################
|
206 |
+
# 8. Когда меняем модель в Dropdown
|
207 |
###############################################################################
|
208 |
|
209 |
+
def on_model_change(selected_model, history, thinking_text):
|
210 |
"""
|
211 |
+
При переключении модели добавляем в историю developer-сообщение,
|
212 |
+
+ добавляем дефолтный промпт этой модели (тоже developer).
|
|
|
213 |
"""
|
214 |
+
new_history = history.copy()
|
215 |
+
|
216 |
+
# Cообщаем модели, что переключились (developer role)
|
217 |
+
new_history.append((
|
218 |
+
# developer-сообщение выглядит как "role=developer" → user_text
|
219 |
+
# => для удобства считаем, что (user_text, None) => потом в _history_to_genai
|
220 |
+
# оно будет "role=developer", а не user.
|
221 |
+
# Но проще завести отдельное поле?
|
222 |
+
# Для упрощения – сделаем pseudo-user c developer-меткой.
|
223 |
+
# Или сделаем костыль: user_msg="[developer]Switched to X" →
|
224 |
+
# и в _history_to_genai, если строка начинается с "[developer]",
|
225 |
+
# ставим role="developer"?
|
226 |
+
#
|
227 |
+
# Более аккуратно — используем 3-элементный кортеж?
|
228 |
+
# Но Gradio Chatbot ждёт (str, str).
|
229 |
+
#
|
230 |
+
# Проще при конвертации _history_to_genai:
|
231 |
+
# если user_text.startswith("<developer>"),
|
232 |
+
# ставим role="developer".
|
233 |
+
#
|
234 |
+
# По заданию: "замени роль system на developer".
|
235 |
+
# Ок, будем распознавать префикс "<developer>: ".
|
236 |
+
#
|
237 |
+
"<developer>: Switched to model: " + selected_model,
|
238 |
+
None
|
239 |
+
))
|
240 |
+
|
241 |
+
# Добавляем дефолтный промпт (developer role)
|
242 |
+
default_prompt = DEFAULT_DEVELOPER_PROMPTS.get(
|
243 |
+
selected_model,
|
244 |
+
"No default prompt for this model."
|
245 |
)
|
246 |
+
new_history.append((
|
247 |
+
"<developer>: " + default_prompt,
|
248 |
+
None
|
249 |
+
))
|
250 |
|
251 |
+
return new_history, thinking_text
|
252 |
|
253 |
###############################################################################
|
254 |
+
# 9. Функция конвертации истории с учётом developer role
|
255 |
###############################################################################
|
256 |
|
257 |
+
def _history_to_genai_enhanced(history, model_name):
|
258 |
+
"""
|
259 |
+
Улучшенная версия, отличающая developer-сообщения
|
260 |
+
(префикс "<developer>: ") от user-сообщений.
|
261 |
+
"""
|
262 |
+
asst_role = _assistant_role(model_name)
|
263 |
+
genai_history = []
|
264 |
+
|
265 |
+
for user_text, bot_text in history:
|
266 |
+
if user_text:
|
267 |
+
if user_text.startswith("<developer>: "):
|
268 |
+
# Считаем это developer role
|
269 |
+
dev_content = user_text.replace("<developer>: ", "", 1)
|
270 |
+
genai_history.append({"role": "developer", "parts": dev_content})
|
271 |
+
else:
|
272 |
+
# Обычный пользователь
|
273 |
+
genai_history.append({"role": "user", "parts": user_text})
|
274 |
+
|
275 |
+
if bot_text:
|
276 |
+
# Ответ ассистента / модель
|
277 |
+
genai_history.append({"role": asst_role, "parts": bot_text})
|
278 |
+
|
279 |
+
return genai_history
|
280 |
+
|
281 |
+
# Обновим наши генераторы, чтобы использовать _history_to_genai_enhanced:
|
282 |
+
async def _respond_stream_enh(model_name, user_message, history):
|
283 |
+
if model_name not in MODELS:
|
284 |
+
yield "Ошибка: модель не найдена."
|
285 |
+
return
|
286 |
+
|
287 |
+
model = MODELS[model_name]
|
288 |
+
genai_history = _history_to_genai_enhanced(history, model_name)
|
289 |
+
|
290 |
+
try:
|
291 |
+
chat = model.start_chat(history=genai_history)
|
292 |
+
stream = chat.send_message(user_message, stream=True)
|
293 |
+
|
294 |
+
partial_text = ""
|
295 |
+
for chunk in stream:
|
296 |
+
partial_text += (chunk.text or "")
|
297 |
+
yield partial_text
|
298 |
+
|
299 |
+
return
|
300 |
+
except Exception as e:
|
301 |
+
yield f"Ошибка при запросе к API: {e}"
|
302 |
+
return
|
303 |
+
|
304 |
+
async def _respond_thinking_enh(model_name, user_message, history):
|
305 |
+
if model_name not in MODELS:
|
306 |
+
yield "Ошибка: модель не найдена.", ""
|
307 |
+
return
|
308 |
+
|
309 |
+
model = MODELS[model_name]
|
310 |
+
genai_history = _history_to_genai_enhanced(history, model_name)
|
311 |
+
|
312 |
+
# "Думаю..."
|
313 |
+
yield "Думаю...", ""
|
314 |
+
|
315 |
+
try:
|
316 |
+
chat = model.start_chat(history=genai_history)
|
317 |
+
response = chat.send_message(user_message, stream=False)
|
318 |
+
|
319 |
+
thinking_process_text = ""
|
320 |
+
final_text = ""
|
321 |
+
|
322 |
+
if response.candidates:
|
323 |
+
parts = response.candidates[0].content.parts
|
324 |
+
for p in parts:
|
325 |
+
if getattr(p, "thought", False):
|
326 |
+
thinking_process_text += p.text or ""
|
327 |
+
else:
|
328 |
+
final_text += p.text or ""
|
329 |
|
330 |
+
# JSON-обёртка
|
331 |
+
final_text_formatted = f"{{output: {final_text}}}"
|
332 |
+
|
333 |
+
yield final_text_formatted, thinking_process_text
|
334 |
+
return
|
335 |
+
except Exception as e:
|
336 |
+
yield f"Ошибка при запросе к API: {e}", ""
|
337 |
+
return
|
338 |
+
|
339 |
+
async def user_send_message_enh(
|
340 |
+
user_message: str,
|
341 |
+
history: list[tuple[str, str]],
|
342 |
+
model_name: str,
|
343 |
+
thinking_text: str
|
344 |
+
):
|
345 |
+
if not user_message.strip():
|
346 |
+
yield history, thinking_text
|
347 |
+
return
|
348 |
+
|
349 |
+
history.append((user_message, None))
|
350 |
+
|
351 |
+
if "thinking" in model_name.lower():
|
352 |
+
async for (assistant_text, thought_text) in _respond_thinking_enh(model_name, user_message, history):
|
353 |
+
history[-1] = (user_message, assistant_text)
|
354 |
+
yield history, thought_text
|
355 |
+
return
|
356 |
+
else:
|
357 |
+
partial_answer = ""
|
358 |
+
async for chunk in _respond_stream_enh(model_name, user_message, history):
|
359 |
+
partial_answer = chunk
|
360 |
+
history[-1] = (user_message, partial_answer)
|
361 |
+
yield history, ""
|
362 |
+
return
|
363 |
|
364 |
###############################################################################
|
365 |
+
# 10. Построение интерфейса Gradio
|
366 |
###############################################################################
|
367 |
|
368 |
with gr.Blocks() as demo:
|
369 |
+
gr.Markdown("## Chat с Gemini. Поддержка developer role, переключения моделей, JSON-ответа для thinking")
|
370 |
|
371 |
with gr.Row():
|
372 |
model_dropdown = gr.Dropdown(
|
|
|
376 |
)
|
377 |
clear_button = gr.Button("Очистить чат")
|
378 |
|
|
|
379 |
history_state = gr.State([])
|
|
|
380 |
thinking_store = gr.State("")
|
381 |
|
382 |
chatbot = gr.Chatbot(label="Диалог с Gemini")
|
383 |
user_input = gr.Textbox(label="Ваш вопрос", placeholder="Введите текст...")
|
384 |
+
thinking_output = gr.Textbox(label="Размышления", interactive=False)
|
385 |
+
send_btn = gr.Button("Отправить")
|
386 |
|
387 |
+
################################################
|
388 |
+
# (A) Обработка переключения модели
|
389 |
+
################################################
|
390 |
+
def handle_model_change(selected_model, history, thinking):
|
391 |
+
new_history, new_thinking = on_model_change(selected_model, history, thinking)
|
392 |
+
return new_history, new_thinking
|
393 |
+
|
394 |
+
# Когда пользователь меняет модель:
|
395 |
+
model_change = model_dropdown.change(
|
396 |
+
fn=handle_model_change,
|
397 |
+
inputs=[model_dropdown, history_state, thinking_store],
|
398 |
+
outputs=[history_state, thinking_store],
|
399 |
+
queue=False
|
400 |
+
).then(
|
401 |
+
# После добавления developer-сообщений в историю → обновляем чат
|
402 |
+
fn=lambda h: h,
|
403 |
+
inputs=[history_state],
|
404 |
+
outputs=[chatbot],
|
405 |
+
queue=False
|
406 |
)
|
407 |
|
408 |
+
################################################
|
409 |
+
# (B) При нажатии «Отправить»
|
410 |
+
################################################
|
411 |
send_chain = send_btn.click(
|
412 |
+
fn=user_send_message_enh,
|
413 |
inputs=[user_input, history_state, model_dropdown, thinking_store],
|
414 |
outputs=[history_state, thinking_store],
|
415 |
queue=True
|
416 |
)
|
|
|
417 |
send_chain.then(
|
418 |
fn=lambda h: h,
|
419 |
inputs=[history_state],
|
420 |
outputs=[chatbot],
|
421 |
queue=True
|
422 |
)
|
|
|
423 |
send_chain.then(
|
424 |
fn=lambda t: t,
|
425 |
inputs=[thinking_store],
|
426 |
outputs=[thinking_output],
|
427 |
queue=True
|
428 |
)
|
429 |
+
# Очистка поля ввода
|
430 |
send_chain.then(
|
431 |
fn=lambda: "",
|
432 |
inputs=[],
|
|
|
434 |
queue=False
|
435 |
)
|
436 |
|
437 |
+
################################################
|
438 |
+
# (C) При нажатии Enter в textbox
|
439 |
+
################################################
|
440 |
submit_chain = user_input.submit(
|
441 |
+
fn=user_send_message_enh,
|
442 |
inputs=[user_input, history_state, model_dropdown, thinking_store],
|
443 |
outputs=[history_state, thinking_store],
|
444 |
queue=True
|
|
|
455 |
outputs=[thinking_output],
|
456 |
queue=True
|
457 |
)
|
458 |
+
# Очистка поля ввода
|
459 |
submit_chain.then(
|
460 |
fn=lambda: "",
|
461 |
inputs=[],
|
|
|
463 |
queue=False
|
464 |
)
|
465 |
|
466 |
+
################################################
|
467 |
+
# (D) Кнопка «Очистить»
|
468 |
+
################################################
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
469 |
clear_chain = clear_button.click(
|
470 |
fn=clear_all,
|
471 |
inputs=[],
|
|
|
481 |
fn=lambda _: "",
|
482 |
inputs=[],
|
483 |
outputs=[thinking_output]
|
484 |
+
)
|
485 |
+
clear_chain.then(
|
486 |
fn=lambda: "",
|
487 |
inputs=[],
|
488 |
outputs=[user_input]
|
489 |
)
|
490 |
|
491 |
+
# Запуск
|
492 |
if __name__ == "__main__":
|
493 |
demo.launch()
|