Spaces:
Sleeping
Sleeping
import gradio as gr | |
import google.generativeai as genai | |
import os | |
import asyncio # Import для асинхронности | |
# Безопасное получение API ключа | |
GEMINI_API_KEY = "AIzaSyBoqoPX-9uzvXyxzse0gRwH8_P9xO6O3Bc" | |
if not GEMINI_API_KEY: | |
print("Error: GEMINI_API_KEY environment variable not set.") | |
exit() | |
genai.configure(api_key=GEMINI_API_KEY) | |
AVAILABLE_MODELS = ["gemini-1.5-flash", "gemini-1.5-pro", "gemini-2.0-flash-thinking-exp"] | |
# Инициализация моделей один раз при запуске приложения | |
MODELS = {model_name: genai.GenerativeModel(model_name=model_name) for model_name in AVAILABLE_MODELS} | |
async def respond(message, history, selected_model): | |
model = MODELS.get(selected_model) | |
if not model: | |
yield "Error: Selected model not available.", "" | |
return | |
try: | |
chat = model.start_chat(history=history) | |
response_stream = chat.send_message(message, stream=True) | |
full_response = "" | |
for chunk in response_stream: | |
full_response += (chunk.text or "") | |
yield full_response, "" # Пустая строка для thinking output | |
except Exception as e: | |
yield f"Error during API call: {e}", "" | |
async def respond_thinking(message, history, selected_model): | |
if "thinking" not in selected_model: | |
yield "Thinking model не выбрана.", "" | |
return | |
model = MODELS.get(selected_model) | |
if not model: | |
yield "Error: Selected model not available.", "" | |
return | |
yield "", "Думаю..." # Сообщение о начале размышлений | |
try: | |
response = model.generate_content(message) | |
thinking_process_text = "" | |
model_response_text = "" | |
if response.candidates: | |
for part in response.candidates[0].content.parts: | |
if hasattr(part, 'thought') and part.thought == True: | |
thinking_process_text += f"Model Thought:\n{part.text}\n\n" | |
else: | |
model_response_text += (part.text or "") | |
yield model_response_text, thinking_process_text | |
except Exception as e: | |
yield f"Error during API call: {e}", f"Error during API call: {e}" | |
def update_chatbot_function(model_name): | |
if "thinking" in model_name: | |
return respond_thinking | |
else: | |
return respond | |
with gr.Blocks() as demo: | |
gr.Markdown("# Gemini Chatbot с режимом размышления") | |
with gr.Row(): | |
model_selection = gr.Dropdown( | |
AVAILABLE_MODELS, value="gemini-1.5-flash", label="Выберите модель Gemini" | |
) | |
chatbot = gr.ChatInterface( | |
respond, # Изначально используем асинхронную функцию respond | |
additional_inputs=[model_selection], | |
title="Gemini Chat", | |
description="Общайтесь с моделями Gemini от Google.", | |
) | |
thinking_output = gr.Code(label="Процесс размышления (для моделей с размышлением)") | |
def change_function(model_name): | |
return update_chatbot_function(model_name) | |
model_selection.change( | |
change_function, | |
inputs=[model_selection], | |
outputs=[chatbot], | |
) | |
async def process_message(message, history, model_name): | |
if "thinking" in model_name: | |
generator = respond_thinking(message, history, model_name) | |
response, thinking = await generator.__anext__() # Получаем первое значение (пустое сообщение и "Думаю...") | |
yield response, thinking | |
final_response, final_thinking = await generator.__anext__() # Получаем окончательный ответ и размышления | |
yield final_response, final_thinking | |
else: | |
async for response, _ in respond(message, history, model_name): | |
yield response, "" | |
chatbot.input_messages[-1].submit( | |
process_message, | |
inputs=[chatbot.input_messages[-1], chatbot.chat_memory, model_selection], | |
outputs=[chatbot.output_messages[-1], thinking_output], | |
scroll_to_output=True, | |
) | |
chatbot.input_messages[-1].change( | |
lambda: "", | |
inputs=[], | |
outputs=[thinking_output] | |
) | |
if __name__ == "__main__": | |
demo.launch() |