Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -4,100 +4,123 @@ import google.generativeai as genai
|
|
4 |
import asyncio
|
5 |
|
6 |
###############################################################################
|
7 |
-
# 1. Настройка окружения и
|
8 |
###############################################################################
|
9 |
|
10 |
-
|
11 |
-
GEMINI_API_KEY = "AIzaSyBoqoPX-9uzvXyxzse0gRwH8_P9xO6O3Bc"
|
12 |
if not GEMINI_API_KEY:
|
13 |
print("Error: GEMINI_API_KEY is not set.")
|
14 |
exit()
|
15 |
|
16 |
genai.configure(api_key=GEMINI_API_KEY)
|
17 |
|
18 |
-
# Список
|
19 |
AVAILABLE_MODELS = [
|
20 |
"gemini-2.0-flash-exp",
|
21 |
"gemini-exp-1206",
|
22 |
"gemini-2.0-flash-thinking-exp-1219",
|
23 |
]
|
24 |
|
|
|
25 |
MODELS = {}
|
26 |
for model_name in AVAILABLE_MODELS:
|
27 |
try:
|
28 |
MODELS[model_name] = genai.GenerativeModel(model_name=model_name)
|
29 |
except Exception as e:
|
30 |
-
print(f"[
|
31 |
|
32 |
###############################################################################
|
33 |
-
# 2.
|
34 |
###############################################################################
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
35 |
|
|
|
|
|
|
|
36 |
def _assistant_role(model_name: str) -> str:
|
37 |
"""
|
38 |
-
Для некоторых моделей
|
39 |
-
|
|
|
40 |
"""
|
41 |
-
#
|
42 |
-
#
|
43 |
-
#
|
44 |
-
|
45 |
if model_name in ["gemini-exp-1206", "gemini-2.0-flash-thinking-exp-1219"]:
|
46 |
return "model"
|
47 |
-
# Все остальные — пусть будут "assistant"
|
48 |
return "assistant"
|
49 |
|
50 |
###############################################################################
|
51 |
-
#
|
52 |
###############################################################################
|
53 |
-
|
54 |
def _history_gradio_to_genai(history, model_name):
|
55 |
"""
|
56 |
-
Gradio
|
57 |
-
|
58 |
-
|
|
|
|
|
|
|
59 |
"""
|
60 |
genai_history = []
|
|
|
|
|
|
|
|
|
|
|
|
|
61 |
asst_role = _assistant_role(model_name)
|
62 |
|
63 |
for user_text, bot_text in history:
|
64 |
-
# Сообщение
|
65 |
if user_text:
|
66 |
genai_history.append({"role": "user", "parts": user_text})
|
67 |
-
# Сообщение
|
68 |
if bot_text:
|
69 |
genai_history.append({"role": asst_role, "parts": bot_text})
|
70 |
|
71 |
return genai_history
|
72 |
|
73 |
-
|
74 |
###############################################################################
|
75 |
-
#
|
76 |
###############################################################################
|
77 |
|
78 |
async def _respond_stream(model_name, user_message, history):
|
79 |
"""
|
80 |
-
|
81 |
-
Возвращаем кусочки текста через yield.
|
82 |
"""
|
83 |
if model_name not in MODELS:
|
84 |
-
yield "
|
85 |
return
|
86 |
|
87 |
model = MODELS[model_name]
|
88 |
-
|
89 |
-
# Конвертируем историю в нужный формат
|
90 |
genai_history = _history_gradio_to_genai(history, model_name)
|
91 |
|
92 |
try:
|
93 |
chat = model.start_chat(history=genai_history)
|
94 |
response_stream = chat.send_message(user_message, stream=True)
|
95 |
-
|
96 |
partial_text = ""
|
97 |
for chunk in response_stream:
|
98 |
partial_text += (chunk.text or "")
|
99 |
yield partial_text
|
100 |
-
|
101 |
return
|
102 |
except Exception as e:
|
103 |
yield f"Ошибка при запросе к API: {e}"
|
@@ -106,18 +129,20 @@ async def _respond_stream(model_name, user_message, history):
|
|
106 |
|
107 |
async def _respond_thinking(model_name, user_message, history):
|
108 |
"""
|
109 |
-
|
110 |
1) "Думаю..."
|
111 |
-
2)
|
|
|
|
|
112 |
"""
|
113 |
if model_name not in MODELS:
|
114 |
-
yield "
|
115 |
return
|
116 |
|
117 |
model = MODELS[model_name]
|
118 |
genai_history = _history_gradio_to_genai(history, model_name)
|
119 |
|
120 |
-
#
|
121 |
yield "Думаю...", ""
|
122 |
|
123 |
try:
|
@@ -126,7 +151,6 @@ async def _respond_thinking(model_name, user_message, history):
|
|
126 |
|
127 |
thinking_process_text = ""
|
128 |
final_text = ""
|
129 |
-
|
130 |
if response.candidates:
|
131 |
parts = response.candidates[0].content.parts
|
132 |
for p in parts:
|
@@ -135,44 +159,37 @@ async def _respond_thinking(model_name, user_message, history):
|
|
135 |
else:
|
136 |
final_text += p.text or ""
|
137 |
|
138 |
-
|
|
|
|
|
|
|
139 |
return
|
140 |
except Exception as e:
|
141 |
yield f"Ошибка при запросе к API: {e}", ""
|
142 |
return
|
143 |
|
144 |
-
|
145 |
###############################################################################
|
146 |
-
#
|
147 |
###############################################################################
|
148 |
-
|
149 |
-
async def user_send_message(
|
150 |
-
user_message: str,
|
151 |
-
history: list[tuple[str, str]],
|
152 |
-
model_name: str,
|
153 |
-
thinking_text: str
|
154 |
-
):
|
155 |
"""
|
156 |
-
|
157 |
-
|
158 |
-
history : [(user, assistant), ...]
|
159 |
-
model_name : текущая выбранная модель
|
160 |
-
thinking_text: текущее «размышление»
|
161 |
-
|
162 |
-
Возвращаем (обновлённая_история, новое_thinking_text) пошагово через yield.
|
163 |
"""
|
164 |
-
# Если
|
165 |
if not user_message.strip():
|
166 |
yield history, thinking_text
|
167 |
return
|
168 |
|
169 |
-
# Добавляем
|
170 |
history.append((user_message, None))
|
171 |
|
172 |
-
#
|
173 |
if "thinking" in model_name.lower():
|
174 |
async for (assistant_text, thought_text) in _respond_thinking(model_name, user_message, history):
|
|
|
175 |
history[-1] = (user_message, assistant_text)
|
|
|
176 |
yield history, thought_text
|
177 |
return
|
178 |
else:
|
@@ -184,68 +201,106 @@ async def user_send_message(
|
|
184 |
yield history, ""
|
185 |
return
|
186 |
|
187 |
-
|
188 |
###############################################################################
|
189 |
-
#
|
190 |
###############################################################################
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
191 |
|
|
|
|
|
|
|
192 |
def clear_all():
|
193 |
-
"""
|
194 |
return [], ""
|
195 |
|
196 |
-
|
197 |
###############################################################################
|
198 |
-
#
|
199 |
###############################################################################
|
200 |
-
|
201 |
with gr.Blocks() as demo:
|
202 |
-
gr.Markdown("## Chat
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
203 |
|
204 |
with gr.Row():
|
205 |
model_dropdown = gr.Dropdown(
|
206 |
choices=AVAILABLE_MODELS,
|
207 |
-
value="gemini-2.0-flash-exp",
|
208 |
-
label="
|
209 |
)
|
210 |
clear_button = gr.Button("Очистить чат")
|
211 |
|
212 |
-
|
213 |
-
|
214 |
-
|
215 |
-
|
216 |
-
|
217 |
-
|
218 |
-
|
219 |
-
|
220 |
-
# Поле для размышлений (только для thinking-модели)
|
221 |
thinking_output = gr.Textbox(
|
222 |
-
label="Размышления",
|
223 |
interactive=False
|
224 |
)
|
225 |
-
|
226 |
send_btn = gr.Button("Отправить")
|
227 |
|
228 |
-
#
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
229 |
send_chain = send_btn.click(
|
230 |
fn=user_send_message,
|
231 |
inputs=[user_input, history_state, model_dropdown, thinking_store],
|
232 |
outputs=[history_state, thinking_store],
|
233 |
queue=True
|
234 |
)
|
|
|
235 |
send_chain.then(
|
236 |
fn=lambda h: h,
|
237 |
inputs=[history_state],
|
238 |
outputs=[chatbot],
|
239 |
queue=True
|
240 |
)
|
|
|
241 |
send_chain.then(
|
242 |
fn=lambda t: t,
|
243 |
inputs=[thinking_store],
|
244 |
outputs=[thinking_output],
|
245 |
queue=True
|
246 |
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
247 |
|
248 |
-
# При нажатии Enter
|
249 |
submit_chain = user_input.submit(
|
250 |
fn=user_send_message,
|
251 |
inputs=[user_input, history_state, model_dropdown, thinking_store],
|
@@ -264,8 +319,15 @@ with gr.Blocks() as demo:
|
|
264 |
outputs=[thinking_output],
|
265 |
queue=True
|
266 |
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
267 |
|
268 |
-
#
|
269 |
clear_chain = clear_button.click(
|
270 |
fn=clear_all,
|
271 |
inputs=[],
|
@@ -282,6 +344,11 @@ with gr.Blocks() as demo:
|
|
282 |
inputs=[],
|
283 |
outputs=[thinking_output]
|
284 |
)
|
|
|
|
|
|
|
|
|
|
|
285 |
|
286 |
if __name__ == "__main__":
|
287 |
demo.launch()
|
|
|
4 |
import asyncio
|
5 |
|
6 |
###############################################################################
|
7 |
+
# 1. Настройка окружения и моделей
|
8 |
###############################################################################
|
9 |
|
10 |
+
GEMINI_API_KEY = os.environ.get("GEMINI_API_KEY", "ВАШ_КЛЮЧ_ЗДЕСЬ")
|
|
|
11 |
if not GEMINI_API_KEY:
|
12 |
print("Error: GEMINI_API_KEY is not set.")
|
13 |
exit()
|
14 |
|
15 |
genai.configure(api_key=GEMINI_API_KEY)
|
16 |
|
17 |
+
# Список моделей
|
18 |
AVAILABLE_MODELS = [
|
19 |
"gemini-2.0-flash-exp",
|
20 |
"gemini-exp-1206",
|
21 |
"gemini-2.0-flash-thinking-exp-1219",
|
22 |
]
|
23 |
|
24 |
+
# Инициализация моделей
|
25 |
MODELS = {}
|
26 |
for model_name in AVAILABLE_MODELS:
|
27 |
try:
|
28 |
MODELS[model_name] = genai.GenerativeModel(model_name=model_name)
|
29 |
except Exception as e:
|
30 |
+
print(f"[!] Не удалось инициализировать {model_name}: {e}")
|
31 |
|
32 |
###############################################################################
|
33 |
+
# 2. Промпты по умолчанию (system message) для каждой модели
|
34 |
###############################################################################
|
35 |
+
MODEL_SYSTEM_PROMPTS = {
|
36 |
+
# Пример: для обычной модели
|
37 |
+
"gemini-2.0-flash-exp": (
|
38 |
+
"You are a helpful assistant. "
|
39 |
+
"You respond in Markdown. "
|
40 |
+
"You do NOT wrap your final answer in {output: ...}."
|
41 |
+
),
|
42 |
+
# Допустим, это тоже обычная модель
|
43 |
+
"gemini-exp-1206": (
|
44 |
+
"You are an experimental Gemini model. "
|
45 |
+
"You respond in Markdown. "
|
46 |
+
"You do NOT wrap your final answer in {output: ...}."
|
47 |
+
),
|
48 |
+
# Думающая модель
|
49 |
+
"gemini-2.0-flash-thinking-exp-1219": (
|
50 |
+
"You are a thinking model. "
|
51 |
+
"You respond with thoughts internally, but produce the **final** answer in JSON format, like `{output: (final text)}`."
|
52 |
+
),
|
53 |
+
}
|
54 |
|
55 |
+
###############################################################################
|
56 |
+
# 3. Определяем, какую роль использовать: 'assistant' или 'model'
|
57 |
+
###############################################################################
|
58 |
def _assistant_role(model_name: str) -> str:
|
59 |
"""
|
60 |
+
Для некоторых моделей Gemini требуется role='model', а не 'assistant'.
|
61 |
+
Если столкнетесь с ошибкой "Please use a valid role: user, model",
|
62 |
+
укажите здесь 'model'.
|
63 |
"""
|
64 |
+
# Условие — подстройте под свои модели:
|
65 |
+
# Предположим, gemini-exp-1206 и gemini-2.0-flash-thinking-exp-1219
|
66 |
+
# требуют role='model', а остальные — 'assistant'
|
|
|
67 |
if model_name in ["gemini-exp-1206", "gemini-2.0-flash-thinking-exp-1219"]:
|
68 |
return "model"
|
|
|
69 |
return "assistant"
|
70 |
|
71 |
###############################################################################
|
72 |
+
# 4. Утилиты для конвертации истории
|
73 |
###############################################################################
|
|
|
74 |
def _history_gradio_to_genai(history, model_name):
|
75 |
"""
|
76 |
+
Gradio: [(user_msg, bot_msg), (user_msg, bot_msg), ...]
|
77 |
+
→ Gemini: [{'role': 'user'|'assistant'|'model'|'system', 'parts': ...}, ...]
|
78 |
+
|
79 |
+
Добавляем system-сообщение (промпт по умолчанию) в начало.
|
80 |
+
Далее все user-реплики = {'role': 'user'},
|
81 |
+
все ответы ассистента = {'role': <assistant_role>}.
|
82 |
"""
|
83 |
genai_history = []
|
84 |
+
|
85 |
+
# Системное сообщение (промпт по умолчанию)
|
86 |
+
system_prompt = MODEL_SYSTEM_PROMPTS.get(model_name, "")
|
87 |
+
if system_prompt:
|
88 |
+
genai_history.append({"role": "system", "parts": system_prompt})
|
89 |
+
|
90 |
asst_role = _assistant_role(model_name)
|
91 |
|
92 |
for user_text, bot_text in history:
|
93 |
+
# Сообщение пользователя
|
94 |
if user_text:
|
95 |
genai_history.append({"role": "user", "parts": user_text})
|
96 |
+
# Сообщение "ассистента"
|
97 |
if bot_text:
|
98 |
genai_history.append({"role": asst_role, "parts": bot_text})
|
99 |
|
100 |
return genai_history
|
101 |
|
|
|
102 |
###############################################################################
|
103 |
+
# 5. Функции-генераторы для ответа
|
104 |
###############################################################################
|
105 |
|
106 |
async def _respond_stream(model_name, user_message, history):
|
107 |
"""
|
108 |
+
Обычная модель, stream=True: выдаём ответ порциями.
|
|
|
109 |
"""
|
110 |
if model_name not in MODELS:
|
111 |
+
yield "Error: model not found."
|
112 |
return
|
113 |
|
114 |
model = MODELS[model_name]
|
|
|
|
|
115 |
genai_history = _history_gradio_to_genai(history, model_name)
|
116 |
|
117 |
try:
|
118 |
chat = model.start_chat(history=genai_history)
|
119 |
response_stream = chat.send_message(user_message, stream=True)
|
|
|
120 |
partial_text = ""
|
121 |
for chunk in response_stream:
|
122 |
partial_text += (chunk.text or "")
|
123 |
yield partial_text
|
|
|
124 |
return
|
125 |
except Exception as e:
|
126 |
yield f"Ошибка при запросе к API: {e}"
|
|
|
129 |
|
130 |
async def _respond_thinking(model_name, user_message, history):
|
131 |
"""
|
132 |
+
Думающая модель:
|
133 |
1) "Думаю..."
|
134 |
+
2) По завершении:
|
135 |
+
- собираем "мысли" (part.thought)
|
136 |
+
- итоговый ответ оборачиваем в {output: ...} (согласно ТЗ)
|
137 |
"""
|
138 |
if model_name not in MODELS:
|
139 |
+
yield "Error: model not found.", ""
|
140 |
return
|
141 |
|
142 |
model = MODELS[model_name]
|
143 |
genai_history = _history_gradio_to_genai(history, model_name)
|
144 |
|
145 |
+
# Шаг 1: "Думаю..."
|
146 |
yield "Думаю...", ""
|
147 |
|
148 |
try:
|
|
|
151 |
|
152 |
thinking_process_text = ""
|
153 |
final_text = ""
|
|
|
154 |
if response.candidates:
|
155 |
parts = response.candidates[0].content.parts
|
156 |
for p in parts:
|
|
|
159 |
else:
|
160 |
final_text += p.text or ""
|
161 |
|
162 |
+
# По ТЗ: оборачиваем финальный ответ в {output: ...}
|
163 |
+
final_text_json = f"{{output: ({final_text})}}"
|
164 |
+
|
165 |
+
yield final_text_json, thinking_process_text
|
166 |
return
|
167 |
except Exception as e:
|
168 |
yield f"Ошибка при запросе к API: {e}", ""
|
169 |
return
|
170 |
|
|
|
171 |
###############################################################################
|
172 |
+
# 6. Основная функция для одного шага диалога
|
173 |
###############################################################################
|
174 |
+
async def user_send_message(user_message, history, model_name, thinking_text):
|
|
|
|
|
|
|
|
|
|
|
|
|
175 |
"""
|
176 |
+
Пользователь ввёл user_message; на основе history + model_name генерируем ответ.
|
177 |
+
Возвращаем (history, thinking_text) через yield на ��аждом шаге.
|
|
|
|
|
|
|
|
|
|
|
178 |
"""
|
179 |
+
# Если пустая строка — ничего не делаем
|
180 |
if not user_message.strip():
|
181 |
yield history, thinking_text
|
182 |
return
|
183 |
|
184 |
+
# Добавляем ход в history, пока ответ ассистента None
|
185 |
history.append((user_message, None))
|
186 |
|
187 |
+
# Проверяем, является ли модель "думающей"
|
188 |
if "thinking" in model_name.lower():
|
189 |
async for (assistant_text, thought_text) in _respond_thinking(model_name, user_message, history):
|
190 |
+
# Обновляем ответ в истории
|
191 |
history[-1] = (user_message, assistant_text)
|
192 |
+
# Обновляем блок размышлений
|
193 |
yield history, thought_text
|
194 |
return
|
195 |
else:
|
|
|
201 |
yield history, ""
|
202 |
return
|
203 |
|
|
|
204 |
###############################################################################
|
205 |
+
# 7. Логика переключения модели, чтобы «сообщать» о переключении
|
206 |
###############################################################################
|
207 |
+
def switch_model(old_model, new_model, history):
|
208 |
+
"""
|
209 |
+
При смене модели добавляем в историю user-сообщение:
|
210 |
+
"I’m switching from {old_model} to {new_model}."
|
211 |
+
Сохраняем новое имя модели в state (prev_model_state).
|
212 |
+
"""
|
213 |
+
if old_model and new_model and old_model != new_model:
|
214 |
+
switch_message = f"I’m switching from {old_model} to {new_model}."
|
215 |
+
history.append((switch_message, None)) # user_msg, None
|
216 |
+
return new_model, history
|
217 |
|
218 |
+
###############################################################################
|
219 |
+
# 8. Функция очистки
|
220 |
+
###############################################################################
|
221 |
def clear_all():
|
222 |
+
"""Сбросить историю и размышления."""
|
223 |
return [], ""
|
224 |
|
|
|
225 |
###############################################################################
|
226 |
+
# 9. Сборка Gradio-интерфейса
|
227 |
###############################################################################
|
|
|
228 |
with gr.Blocks() as demo:
|
229 |
+
gr.Markdown("## Chat with Gemini (Thinking & Non-Thinking Models)")
|
230 |
+
|
231 |
+
# Храним «предыдущую модель», чтобы отследить переключение
|
232 |
+
prev_model_state = gr.State("")
|
233 |
+
# Храним историю [(user, assistant), ...]
|
234 |
+
history_state = gr.State([])
|
235 |
+
# Храним размышления (только для думающей модели)
|
236 |
+
thinking_store = gr.State("")
|
237 |
|
238 |
with gr.Row():
|
239 |
model_dropdown = gr.Dropdown(
|
240 |
choices=AVAILABLE_MODELS,
|
241 |
+
value="gemini-2.0-flash-exp",
|
242 |
+
label="Choose a model"
|
243 |
)
|
244 |
clear_button = gr.Button("Очистить чат")
|
245 |
|
246 |
+
chatbot = gr.Chatbot(
|
247 |
+
label="Диалог с Gemini",
|
248 |
+
markdown=True # Включаем поддержку Markdown
|
249 |
+
)
|
250 |
+
user_input = gr.Textbox(
|
251 |
+
label="Ваш вопрос",
|
252 |
+
placeholder="Введите текст...",
|
253 |
+
)
|
|
|
254 |
thinking_output = gr.Textbox(
|
255 |
+
label="Размышления (если модель думающая)",
|
256 |
interactive=False
|
257 |
)
|
|
|
258 |
send_btn = gr.Button("Отправить")
|
259 |
|
260 |
+
# --- Обработка переключения модели ---
|
261 |
+
model_dropdown.change(
|
262 |
+
fn=switch_model,
|
263 |
+
inputs=[prev_model_state, model_dropdown, history_state],
|
264 |
+
outputs=[prev_model_state, history_state],
|
265 |
+
queue=False
|
266 |
+
).then(
|
267 |
+
# После смены модели обновим чат (чтобы увидеть user-сообщение о переключении)
|
268 |
+
fn=lambda h: h,
|
269 |
+
inputs=[history_state],
|
270 |
+
outputs=[chatbot],
|
271 |
+
queue=False
|
272 |
+
)
|
273 |
+
|
274 |
+
# --- При нажатии кнопки «Отправить» ---
|
275 |
send_chain = send_btn.click(
|
276 |
fn=user_send_message,
|
277 |
inputs=[user_input, history_state, model_dropdown, thinking_store],
|
278 |
outputs=[history_state, thinking_store],
|
279 |
queue=True
|
280 |
)
|
281 |
+
# Обновляем чат
|
282 |
send_chain.then(
|
283 |
fn=lambda h: h,
|
284 |
inputs=[history_state],
|
285 |
outputs=[chatbot],
|
286 |
queue=True
|
287 |
)
|
288 |
+
# Обновляем размышления
|
289 |
send_chain.then(
|
290 |
fn=lambda t: t,
|
291 |
inputs=[thinking_store],
|
292 |
outputs=[thinking_output],
|
293 |
queue=True
|
294 |
)
|
295 |
+
# Очищаем поле ввода
|
296 |
+
send_chain.then(
|
297 |
+
fn=lambda: "",
|
298 |
+
inputs=[],
|
299 |
+
outputs=[user_input],
|
300 |
+
queue=True
|
301 |
+
)
|
302 |
|
303 |
+
# --- При нажатии Enter в поле ввода ---
|
304 |
submit_chain = user_input.submit(
|
305 |
fn=user_send_message,
|
306 |
inputs=[user_input, history_state, model_dropdown, thinking_store],
|
|
|
319 |
outputs=[thinking_output],
|
320 |
queue=True
|
321 |
)
|
322 |
+
# Очищаем поле
|
323 |
+
submit_chain.then(
|
324 |
+
fn=lambda: "",
|
325 |
+
inputs=[],
|
326 |
+
outputs=[user_input],
|
327 |
+
queue=True
|
328 |
+
)
|
329 |
|
330 |
+
# --- Кнопка «Очистить» ---
|
331 |
clear_chain = clear_button.click(
|
332 |
fn=clear_all,
|
333 |
inputs=[],
|
|
|
344 |
inputs=[],
|
345 |
outputs=[thinking_output]
|
346 |
)
|
347 |
+
clear_chain.then(
|
348 |
+
fn=lambda: "",
|
349 |
+
inputs=[],
|
350 |
+
outputs=[user_input]
|
351 |
+
)
|
352 |
|
353 |
if __name__ == "__main__":
|
354 |
demo.launch()
|