Spaces:
Sleeping
Sleeping
File size: 4,125 Bytes
43c192f af1b567 43c192f af1b567 12fa79e af1b567 12fa79e af1b567 12fa79e af1b567 2f33702 af1b567 2f33702 af1b567 2252124 af1b567 d61ca9b af1b567 d61ca9b af1b567 2f33702 d61ca9b 2f33702 12fa79e 2f33702 3a3437f af1b567 3a3437f af1b567 2f33702 af1b567 d61ca9b af1b567 97eaf87 2f33702 3a3437f af1b567 2f33702 af1b567 12fa79e 97eaf87 43c192f af1b567 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 |
import gradio as gr
import google.generativeai as genai
import os
import asyncio # Import для асинхронности
# Безопасное получение API ключа
GEMINI_API_KEY = "AIzaSyBoqoPX-9uzvXyxzse0gRwH8_P9xO6O3Bc"
if not GEMINI_API_KEY:
print("Error: GEMINI_API_KEY environment variable not set.")
exit()
genai.configure(api_key=GEMINI_API_KEY)
AVAILABLE_MODELS = ["gemini-1.5-flash", "gemini-1.5-pro", "gemini-2.0-flash-thinking-exp"]
# Инициализация моделей один раз при запуске приложения
MODELS = {model_name: genai.GenerativeModel(model_name=model_name) for model_name in AVAILABLE_MODELS}
async def respond(message, history, selected_model):
model = MODELS.get(selected_model)
if not model:
yield {"role": "assistant", "content": "Error: Selected model not available."}
return
try:
chat = model.start_chat(history=history)
response_stream = chat.send_message(message, stream=True)
full_response = ""
for chunk in response_stream:
full_response += (chunk.text or "")
yield {"role": "assistant", "content": full_response}
except Exception as e:
yield {"role": "assistant", "content": f"Error during API call: {e}"}
async def respond_thinking(message, history, selected_model):
if "thinking" not in selected_model:
yield {"role": "assistant", "content": "Thinking model не выбрана."}, ""
return
model = MODELS.get(selected_model)
if not model:
yield {"role": "assistant", "content": "Error: Selected model not available."}, ""
return
yield {"role": "assistant", "content": "Думаю..."}
try:
response = model.generate_content(message)
thinking_process_text = ""
model_response_text = ""
if response.candidates:
for part in response.candidates[0].content.parts:
if hasattr(part, 'thought') and part.thought == True:
thinking_process_text += f"Model Thought:\n{part.text}\n\n"
else:
model_response_text += (part.text or "")
yield ({"role": "assistant", "content": model_response_text}, thinking_process_text)
except Exception as e:
yield ({"role": "assistant", "content": f"Error during API call: {e}"}, f"Error during API call: {e}")
async def process_message(message, history, model_name):
if "thinking" in model_name:
async for response_thinking in respond_thinking(message, history, model_name):
yield response_thinking
else:
async for response in respond(message, history, model_name):
yield response, ""
def clear_thinking():
return ""
with gr.Blocks() as demo:
gr.Markdown("# Gemini Chatbot с режимом размышления")
with gr.Row():
model_selection = gr.Dropdown(
AVAILABLE_MODELS, value="gemini-1.5-flash", label="Выберите модель Gemini"
)
thinking_output = gr.Code(label="Процесс размышления (для моделей с размышлением)")
chatbot = gr.ChatInterface(
process_message, # Функция обработки сообщений передается сюда
additional_inputs=[model_selection],
title="Gemini Chat",
description="Общайтесь с моделями Gemini от Google.",
type="messages",
outputs=[gr.Chatbot(), thinking_output] # Явное указание выходов
)
with gr.Row():
clear_button = gr.Button("Очистить")
def change_chatbot(model_name):
return gr.ChatInterface.update() # No need to change the processing function here
model_selection.change(
change_chatbot,
inputs=[model_selection],
outputs=[chatbot],
)
clear_button.click(lambda: None, None, chatbot, queue=False)
clear_button.click(clear_thinking, outputs=[thinking_output], queue=False)
if __name__ == "__main__":
demo.launch() |