File size: 4,448 Bytes
43c192f
af1b567
 
 
43c192f
af1b567
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
43c192f
 
af1b567
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
import gradio as gr
import google.generativeai as genai
import os
import asyncio  # Import для асинхронности

# Безопасное получение API ключа
GEMINI_API_KEY = "AIzaSyBoqoPX-9uzvXyxzse0gRwH8_P9xO6O3Bc"
if not GEMINI_API_KEY:
    print("Error: GEMINI_API_KEY environment variable not set.")
    exit()

genai.configure(api_key=GEMINI_API_KEY)

AVAILABLE_MODELS = ["gemini-1.5-flash", "gemini-1.5-pro", "gemini-2.0-flash-thinking-exp"]

# Инициализация моделей один раз при запуске приложения
MODELS = {model_name: genai.GenerativeModel(model_name=model_name) for model_name in AVAILABLE_MODELS}

async def respond(message, history, selected_model):
    model = MODELS.get(selected_model)
    if not model:
        yield "Error: Selected model not available.", ""
        return

    try:
        chat = model.start_chat(history=history)
        response_stream = chat.send_message(message, stream=True)
        full_response = ""
        for chunk in response_stream:
            full_response += (chunk.text or "")
            yield full_response, ""  # Пустая строка для thinking output
    except Exception as e:
        yield f"Error during API call: {e}", ""

async def respond_thinking(message, history, selected_model):
    if "thinking" not in selected_model:
        yield "Thinking model не выбрана.", ""
        return

    model = MODELS.get(selected_model)
    if not model:
        yield "Error: Selected model not available.", ""
        return

    yield "", "Думаю..." # Сообщение о начале размышлений

    try:
        response = model.generate_content(message)
        thinking_process_text = ""
        model_response_text = ""

        if response.candidates:
            for part in response.candidates[0].content.parts:
                if hasattr(part, 'thought') and part.thought == True:
                    thinking_process_text += f"Model Thought:\n{part.text}\n\n"
                else:
                    model_response_text += (part.text or "")

        yield model_response_text, thinking_process_text
    except Exception as e:
        yield f"Error during API call: {e}", f"Error during API call: {e}"

def update_chatbot_function(model_name):
    if "thinking" in model_name:
        return respond_thinking
    else:
        return respond

with gr.Blocks() as demo:
    gr.Markdown("# Gemini Chatbot с режимом размышления")

    with gr.Row():
        model_selection = gr.Dropdown(
            AVAILABLE_MODELS, value="gemini-1.5-flash", label="Выберите модель Gemini"
        )

    chatbot = gr.ChatInterface(
        respond,  # Изначально используем асинхронную функцию respond
        additional_inputs=[model_selection],
        title="Gemini Chat",
        description="Общайтесь с моделями Gemini от Google.",
    )

    thinking_output = gr.Code(label="Процесс размышления (для моделей с размышлением)")

    def change_function(model_name):
        return update_chatbot_function(model_name)

    model_selection.change(
        change_function,
        inputs=[model_selection],
        outputs=[chatbot],
    )

    async def process_message(message, history, model_name):
        if "thinking" in model_name:
            generator = respond_thinking(message, history, model_name)
            response, thinking = await generator.__anext__() # Получаем первое значение (пустое сообщение и "Думаю...")
            yield response, thinking
            final_response, final_thinking = await generator.__anext__() # Получаем окончательный ответ и размышления
            yield final_response, final_thinking
        else:
            async for response, _ in respond(message, history, model_name):
                yield response, ""

    chatbot.input_messages[-1].submit(
        process_message,
        inputs=[chatbot.input_messages[-1], chatbot.chat_memory, model_selection],
        outputs=[chatbot.output_messages[-1], thinking_output],
        scroll_to_output=True,
    )

    chatbot.input_messages[-1].change(
        lambda: "",
        inputs=[],
        outputs=[thinking_output]
    )

if __name__ == "__main__":
    demo.launch()