import gradio as gr import google.generativeai as genai import os import asyncio # Import для асинхронности # Безопасное получение API ключа GEMINI_API_KEY = "AIzaSyBoqoPX-9uzvXyxzse0gRwH8_P9xO6O3Bc" if not GEMINI_API_KEY: print("Error: GEMINI_API_KEY environment variable not set.") exit() genai.configure(api_key=GEMINI_API_KEY) AVAILABLE_MODELS = ["gemini-1.5-flash", "gemini-1.5-pro", "gemini-2.0-flash-thinking-exp"] # Инициализация моделей один раз при запуске приложения MODELS = {model_name: genai.GenerativeModel(model_name=model_name) for model_name in AVAILABLE_MODELS} async def respond(message, history, selected_model): model = MODELS.get(selected_model) if not model: yield {"role": "assistant", "content": "Error: Selected model not available."}, "" return try: chat = model.start_chat(history=history) response_stream = chat.send_message(message, stream=True) full_response = "" for chunk in response_stream: full_response += (chunk.text or "") yield {"role": "assistant", "content": full_response}, "" # Формат messages except Exception as e: yield {"role": "assistant", "content": f"Error during API call: {e}"}, "" async def respond_thinking(message, history, selected_model): if "thinking" not in selected_model: yield {"role": "assistant", "content": "Thinking model не выбрана."}, "" return model = MODELS.get(selected_model) if not model: yield {"role": "assistant", "content": "Error: Selected model not available."}, "" return yield {"role": "assistant", "content": "Думаю..."}, "" # Сообщение о начале размышлений try: response = model.generate_content(message) thinking_process_text = "" model_response_text = "" if response.candidates: for part in response.candidates[0].content.parts: if hasattr(part, 'thought') and part.thought == True: thinking_process_text += f"Model Thought:\n{part.text}\n\n" else: model_response_text += (part.text or "") yield {"role": "assistant", "content": model_response_text}, thinking_process_text except Exception as e: yield {"role": "assistant", "content": f"Error during API call: {e}"}, f"Error during API call: {e}" def update_chatbot_function(model_name): if "thinking" in model_name: return respond_thinking else: return respond async def process_message(message, history, model_name): if "thinking" in model_name: generator = respond_thinking(message, history, model_name) thinking_output_content = "" async for response, thinking in generator: yield response, thinking # Здесь можно было бы дополнительно обработать thinking_output_content, если нужно else: async for response, _ in respond(message, history, model_name): yield response, "" with gr.Blocks() as demo: gr.Markdown("# Gemini Chatbot с режимом размышления") with gr.Row(): model_selection = gr.Dropdown( AVAILABLE_MODELS, value="gemini-1.5-flash", label="Выберите модель Gemini" ) chatbot = gr.ChatInterface( process_message, # Функция обработки сообщений передается сюда additional_inputs=[model_selection], title="Gemini Chat", description="Общайтесь с моделями Gemini от Google.", type="messages" ) thinking_output = gr.Code(label="Процесс размышления (для моделей с размышлением)") def change_function(model_name): # Обновляем функцию обработки сообщений при смене модели if "thinking" in model_name: return respond_thinking else: return respond def change_chatbot(model_name): return gr.ChatInterface.update(fn=process_message) model_selection.change( change_chatbot, inputs=[model_selection], outputs=[chatbot], ) def clear_thinking(): return "" chatbot.clear(inputs=[], outputs=[thinking_output]) if __name__ == "__main__": demo.launch()