Spaces:
Sleeping
Sleeping
import gradio as gr | |
import google.generativeai as genai | |
import os | |
import asyncio # Import для асинхронности | |
# Безопасное получение API ключа | |
GEMINI_API_KEY = "AIzaSyBoqoPX-9uzvXyxzse0gRwH8_P9xO6O3Bc" | |
if not GEMINI_API_KEY: | |
print("Error: GEMINI_API_KEY environment variable not set.") | |
exit() | |
genai.configure(api_key=GEMINI_API_KEY) | |
AVAILABLE_MODELS = ["gemini-1.5-flash", "gemini-1.5-pro", "gemini-2.0-flash-thinking-exp"] | |
# Инициализация моделей один раз при запуске приложения | |
MODELS = {model_name: genai.GenerativeModel(model_name=model_name) for model_name in AVAILABLE_MODELS} | |
async def respond(message, history, selected_model): | |
model = MODELS.get(selected_model) | |
if not model: | |
yield {"role": "assistant", "content": "Error: Selected model not available."} | |
return | |
try: | |
chat = model.start_chat(history=history) | |
response_stream = chat.send_message(message, stream=True) | |
full_response = "" | |
for chunk in response_stream: | |
full_response += (chunk.text or "") | |
yield {"role": "assistant", "content": full_response} | |
except Exception as e: | |
yield {"role": "assistant", "content": f"Error during API call: {e}"} | |
async def respond_thinking(message, history, selected_model): | |
if "thinking" not in selected_model: | |
yield {"role": "assistant", "content": "Thinking model не выбрана."}, "" | |
return | |
model = MODELS.get(selected_model) | |
if not model: | |
yield {"role": "assistant", "content": "Error: Selected model not available."}, "" | |
return | |
yield {"role": "assistant", "content": "Думаю..."} | |
try: | |
response = model.generate_content(message) | |
thinking_process_text = "" | |
model_response_text = "" | |
if response.candidates: | |
for part in response.candidates[0].content.parts: | |
if hasattr(part, 'thought') and part.thought == True: | |
thinking_process_text += f"Model Thought:\n{part.text}\n\n" | |
else: | |
model_response_text += (part.text or "") | |
yield {"role": "assistant", "content": model_response_text}, thinking_process_text | |
except Exception as e: | |
yield {"role": "assistant", "content": f"Error during API call: {e}"}, f"Error during API call: {e}" | |
async def process_message(message, history, model_name): | |
if "thinking" in model_name: | |
async for response, thinking in respond_thinking(message, history, model_name): | |
yield response | |
else: | |
async for response in respond(message, history, model_name): | |
yield response | |
def clear_thinking(): | |
return "" | |
with gr.Blocks() as demo: | |
gr.Markdown("# Gemini Chatbot с режимом размышления") | |
with gr.Row(): | |
model_selection = gr.Dropdown( | |
AVAILABLE_MODELS, value="gemini-1.5-flash", label="Выберите модель Gemini" | |
) | |
thinking_output = gr.Code(label="Процесс размышления (для моделей с размышлением)") | |
chatbot = gr.ChatInterface( | |
process_message, # Функция обработки сообщений передается сюда | |
additional_inputs=[model_selection], | |
title="Gemini Chat", | |
description="Общайтесь с моделями Gemini от Google.", | |
type="messages" | |
) | |
with gr.Row(): | |
clear_button = gr.Button("Очистить") | |
def change_chatbot(model_name): | |
return gr.ChatInterface.update() | |
async def update_thinking(history): | |
if history and history[-1][0] == "assistant" and len(history[-1]) > 1: | |
return history[-1][1] | |
return "" | |
model_selection.change( | |
change_chatbot, | |
inputs=[model_selection], | |
outputs=[chatbot], | |
) | |
chatbot.on_message( | |
update_thinking, | |
inputs=[chatbot], | |
outputs=[thinking_output] | |
) | |
clear_button.click(lambda: None, None, chatbot, queue=False) | |
clear_button.click(clear_thinking, outputs=[thinking_output], queue=False) | |
if __name__ == "__main__": | |
demo.launch() |