Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -60,12 +60,6 @@ async def respond_thinking(message, history, selected_model):
|
|
60 |
except Exception as e:
|
61 |
yield {"role": "assistant", "content": f"Error during API call: {e}"}, f"Error during API call: {e}"
|
62 |
|
63 |
-
def update_chatbot_function(model_name):
|
64 |
-
if "thinking" in model_name:
|
65 |
-
return respond_thinking
|
66 |
-
else:
|
67 |
-
return respond
|
68 |
-
|
69 |
async def process_message(message, history, model_name):
|
70 |
if "thinking" in model_name:
|
71 |
generator = respond_thinking(message, history, model_name)
|
@@ -77,6 +71,9 @@ async def process_message(message, history, model_name):
|
|
77 |
async for response, _ in respond(message, history, model_name):
|
78 |
yield response, ""
|
79 |
|
|
|
|
|
|
|
80 |
with gr.Blocks() as demo:
|
81 |
gr.Markdown("# Gemini Chatbot с режимом размышления")
|
82 |
|
@@ -85,25 +82,19 @@ with gr.Blocks() as demo:
|
|
85 |
AVAILABLE_MODELS, value="gemini-1.5-flash", label="Выберите модель Gemini"
|
86 |
)
|
87 |
|
|
|
|
|
88 |
chatbot = gr.ChatInterface(
|
89 |
process_message, # Функция обработки сообщений передается сюда
|
90 |
additional_inputs=[model_selection],
|
91 |
title="Gemini Chat",
|
92 |
description="Общайтесь с моделями Gemini от Google.",
|
93 |
-
type="messages"
|
|
|
94 |
)
|
95 |
|
96 |
-
thinking_output = gr.Code(label="Процесс размышления (для моделей с размышлением)")
|
97 |
-
|
98 |
-
def change_function(model_name):
|
99 |
-
# Обновляем функцию обработки сообщений при смене модели
|
100 |
-
if "thinking" in model_name:
|
101 |
-
return respond_thinking
|
102 |
-
else:
|
103 |
-
return respond
|
104 |
-
|
105 |
def change_chatbot(model_name):
|
106 |
-
return gr.ChatInterface.update(
|
107 |
|
108 |
model_selection.change(
|
109 |
change_chatbot,
|
@@ -111,10 +102,5 @@ with gr.Blocks() as demo:
|
|
111 |
outputs=[chatbot],
|
112 |
)
|
113 |
|
114 |
-
def clear_thinking():
|
115 |
-
return ""
|
116 |
-
|
117 |
-
chatbot.clear(fn=clear_thinking, outputs=[thinking_output])
|
118 |
-
|
119 |
if __name__ == "__main__":
|
120 |
demo.launch()
|
|
|
60 |
except Exception as e:
|
61 |
yield {"role": "assistant", "content": f"Error during API call: {e}"}, f"Error during API call: {e}"
|
62 |
|
|
|
|
|
|
|
|
|
|
|
|
|
63 |
async def process_message(message, history, model_name):
|
64 |
if "thinking" in model_name:
|
65 |
generator = respond_thinking(message, history, model_name)
|
|
|
71 |
async for response, _ in respond(message, history, model_name):
|
72 |
yield response, ""
|
73 |
|
74 |
+
def clear_thinking():
|
75 |
+
return ""
|
76 |
+
|
77 |
with gr.Blocks() as demo:
|
78 |
gr.Markdown("# Gemini Chatbot с режимом размышления")
|
79 |
|
|
|
82 |
AVAILABLE_MODELS, value="gemini-1.5-flash", label="Выберите модель Gemini"
|
83 |
)
|
84 |
|
85 |
+
thinking_output = gr.Code(label="Процесс размышления (для моделей с размышлением)")
|
86 |
+
|
87 |
chatbot = gr.ChatInterface(
|
88 |
process_message, # Функция обработки сообщений передается сюда
|
89 |
additional_inputs=[model_selection],
|
90 |
title="Gemini Chat",
|
91 |
description="Общайтесь с моделями Gemini от Google.",
|
92 |
+
type="messages",
|
93 |
+
clear=lambda: ("", "") # Функция для очистки chatbot и thinking_output
|
94 |
)
|
95 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
96 |
def change_chatbot(model_name):
|
97 |
+
return gr.ChatInterface.update() # No need to change the processing function here
|
98 |
|
99 |
model_selection.change(
|
100 |
change_chatbot,
|
|
|
102 |
outputs=[chatbot],
|
103 |
)
|
104 |
|
|
|
|
|
|
|
|
|
|
|
105 |
if __name__ == "__main__":
|
106 |
demo.launch()
|