Update app.py
Browse files
app.py
CHANGED
@@ -1,59 +1,29 @@
|
|
1 |
import gradio as gr
|
2 |
-
import time
|
3 |
-
import re
|
4 |
|
5 |
-
|
|
|
6 |
|
7 |
-
|
8 |
-
"""Formats the chat history for the API call."""
|
9 |
-
messages = [{"role": "system", "content": system_prompt}]
|
10 |
-
for item in chat_history:
|
11 |
-
messages.append({"role": "user", "content": item["user"]})
|
12 |
-
messages.append({"role": "assistant", "content": item.get("assistant", "")})
|
13 |
-
|
14 |
-
messages.append({"role": "user", "content": message})
|
15 |
-
return messages
|
16 |
-
|
17 |
-
def respond(message, chat_history, model, system_prompt, thinking_budget):
|
18 |
-
"""Simulate API call and get the response. Replace with actual API call."""
|
19 |
-
# Simulate a delay to mimic network or processing delay
|
20 |
-
time.sleep(thinking_budget / 10) # Simulated delay based on thinking budget
|
21 |
-
# Dummy response, replace this with an actual model call if necessary
|
22 |
-
response = f"Simulated response for: {message}"
|
23 |
-
return response, 1.0
|
24 |
-
|
25 |
-
def generate(message, history, model, system_prompt, thinking_budget):
|
26 |
-
"""Generates the chatbot response."""
|
27 |
-
chat_formatted = chat_with_ai(message, history, system_prompt)
|
28 |
-
response, thinking_time = respond(message, chat_formatted, model, system_prompt, thinking_budget)
|
29 |
-
history.append({"user": message, "assistant": response})
|
30 |
-
return history, ""
|
31 |
-
|
32 |
-
# Define the default system prompt
|
33 |
DEFAULT_SYSTEM_PROMPT = """
|
34 |
You are a helpful assistant in normal conversation.
|
35 |
When given a problem to solve, you are an expert problem-solving assistant.
|
36 |
Your task is to provide a detailed, step-by-step solution to a given question.
|
37 |
"""
|
38 |
|
|
|
39 |
with gr.Blocks() as demo:
|
40 |
-
gr.Markdown("# Custom Chat Interface")
|
41 |
|
42 |
-
|
43 |
-
|
44 |
-
thinking_budget = gr.Slider(minimum=1, maximum=100, value=10, step=1, label="Thinking Budget")
|
45 |
|
46 |
-
|
47 |
-
|
48 |
-
|
49 |
-
|
50 |
-
# Clear chat function
|
51 |
-
def clear_chat():
|
52 |
-
return [], ""
|
53 |
|
54 |
-
|
55 |
-
|
56 |
-
|
57 |
-
msg.submit(generate, inputs=[msg, chatbot, model, system_prompt, thinking_budget], outputs=[chatbot, msg])
|
58 |
|
59 |
demo.launch()
|
|
|
1 |
import gradio as gr
|
|
|
|
|
2 |
|
3 |
+
# Загрузка Gradio интерфейса из Hugging Face Spaces
|
4 |
+
interface = gr.load("models/Mixtral-8x7B-Instruct-v0.1")
|
5 |
|
6 |
+
# Определение системного промпта по умолчанию
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
7 |
DEFAULT_SYSTEM_PROMPT = """
|
8 |
You are a helpful assistant in normal conversation.
|
9 |
When given a problem to solve, you are an expert problem-solving assistant.
|
10 |
Your task is to provide a detailed, step-by-step solution to a given question.
|
11 |
"""
|
12 |
|
13 |
+
# Создание интерфейса чата с подгруженной моделью
|
14 |
with gr.Blocks() as demo:
|
15 |
+
gr.Markdown("# Custom Chat Interface with AI")
|
16 |
|
17 |
+
system_prompt = gr.Textbox(value=DEFAULT_SYSTEM_PROMPT, lines=5, label="System Prompt")
|
18 |
+
chatbot = gr.Chatbot(label="Chat")
|
|
|
19 |
|
20 |
+
# Функция для чата с использованием модели
|
21 |
+
def chat(message, system_prompt):
|
22 |
+
response = interface([message, system_prompt])
|
23 |
+
return response
|
|
|
|
|
|
|
24 |
|
25 |
+
# Отправка сообщений через текстовое поле
|
26 |
+
msg = gr.Textbox(label="Type your message here...", placeholder="Enter your message...")
|
27 |
+
msg.submit(chat, inputs=[msg, system_prompt], outputs=chatbot)
|
|
|
28 |
|
29 |
demo.launch()
|