vortex123 commited on
Commit
bf0e3ce
·
verified ·
1 Parent(s): 797a6bb

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +32 -106
app.py CHANGED
@@ -1,132 +1,58 @@
1
  import gradio as gr
2
  import time
3
  import re
4
- import os
5
-
6
- MODELS = [
7
- "Mixtral-8x7B-Instruct-v0.1"
8
- ]
9
-
10
-
11
- # Sambanova API base URL
12
- API_BASE = "https://api.sambanova.ai/v1"
13
 
 
14
 
15
  def chat_with_ai(message, chat_history, system_prompt):
16
  """Formats the chat history for the API call."""
17
  messages = [{"role": "system", "content": system_prompt}]
18
- for tup in chat_history:
19
- first_key = list(tup.keys())[0] # First key
20
- last_key = list(tup.keys())[-1] # Last key
21
- messages.append({"role": "user", "content": tup[first_key]})
22
- messages.append({"role": "assistant", "content": tup[last_key]})
23
  messages.append({"role": "user", "content": message})
24
  return messages
25
 
26
- def respond(message, chat_history, model, system_prompt, thinking_budget, api_key):
27
- """Sends the message to the API and gets the response."""
28
-
29
- messages = chat_with_ai(message, chat_history, system_prompt.format(budget=thinking_budget))
30
- start_time = time.time()
31
-
32
- try:
33
- completion = client.chat.completions.create(model=model, messages=messages)
34
- response = completion.choices[0].message.content
35
- thinking_time = time.time() - start_time
36
- return response, thinking_time
37
- except Exception as e:
38
- error_message = f"Error: {str(e)}"
39
- return error_message, time.time() - start_time
40
-
41
- def parse_response(response):
42
- """Parses the response from the API."""
43
- answer_match = re.search(r'<answer>(.*?)</answer>', response, re.DOTALL)
44
- reflection_match = re.search(r'<reflection>(.*?)</reflection>', response, re.DOTALL)
45
 
46
- answer = answer_match.group(1).strip() if answer_match else ""
47
- reflection = reflection_match.group(1).strip() if reflection_match else ""
48
- steps = re.findall(r'<step>(.*?)</step>', response, re.DOTALL)
49
-
50
- if answer == "":
51
- return response, "", ""
52
-
53
- return answer, reflection, steps
54
-
55
- def generate(message, history, model, system_prompt, thinking_budget, api_key):
56
  """Generates the chatbot response."""
57
- response, thinking_time = respond(message, history, model, system_prompt, thinking_budget, api_key)
58
-
59
- if response.startswith("Error:"):
60
- return history + [({"role": "system", "content": response},)], ""
61
-
62
- answer, reflection, steps = parse_response(response)
63
-
64
- messages = []
65
- messages.append({"role": "user", "content": message})
66
-
67
- formatted_steps = [f"Step {i}: {step}" for i, step in enumerate(steps, 1)]
68
- all_steps = "\n".join(formatted_steps) + f"\n\nReflection: {reflection}"
69
-
70
- messages.append({"role": "assistant", "content": all_steps, "metadata": {"title": f"Thinking Time: {thinking_time:.2f} sec"}})
71
- messages.append({"role": "assistant", "content": answer})
72
-
73
- return history + messages, ""
74
 
75
  # Define the default system prompt
76
  DEFAULT_SYSTEM_PROMPT = """
77
  You are a helpful assistant in normal conversation.
78
- When given a problem to solve, you are an expert problem-solving assistant.
79
- Your task is to provide a detailed, step-by-step solution to a given question.
80
- Follow these instructions carefully:
81
- 1. Read the given question carefully and reset counter between <count> and </count> to {budget}
82
- 2. Generate a detailed, logical step-by-step solution.
83
- 3. Enclose each step of your solution within <step> and </step> tags.
84
- 4. You are allowed to use at most {budget} steps (starting budget),
85
- keep track of it by counting down within tags <count> </count>,
86
- STOP GENERATING MORE STEPS when hitting 0, you don't have to use all of them.
87
- 5. Do a self-reflection when you are unsure about how to proceed,
88
- based on the self-reflection and reward, decides whether you need to return
89
- to the previous steps.
90
- 6. After completing the solution steps, reorganize and synthesize the steps
91
- into the final answer within <answer> and </answer> tags.
92
- 7. Provide a critical, honest and subjective self-evaluation of your reasoning
93
- process within <reflection> and </reflection> tags.
94
- 8. Assign a quality score to your solution as a float between 0.0 (lowest
95
- quality) and 1.0 (highest quality), enclosed in <reward> and </reward> tags.
96
- Example format:
97
- <count> [starting budget] </count>
98
- <step> [Content of step 1] </step>
99
- <count> [remaining budget] </count>
100
- <step> [Content of step 2] </step>
101
- <reflection> [Evaluation of the steps so far] </reflection>
102
- <reward> [Float between 0.0 and 1.0] </reward>
103
- <count> [remaining budget] </count>
104
- <step> [Content of step 3 or Content of some previous step] </step>
105
- <count> [remaining budget] </count>
106
  ...
107
- <step> [Content of final step] </step>
108
- <count> [remaining budget] </count>
109
- <answer> [Final Answer] </answer> (must give final answer in this format)
110
- <reflection> [Evaluation of the solution] </reflection>
111
- <reward> [Float between 0.0 and 1.0] </reward>
112
  """
113
 
114
  with gr.Blocks() as demo:
115
- gr.Markdown("# Llama3.1-Instruct-O1")
116
- gr.Markdown("[Powered by SambaNova Cloud, Get Your API Key Here](https://cloud.sambanova.ai/apis)")
117
-
118
  with gr.Row():
119
  model = gr.Dropdown(choices=MODELS, label="Select Model", value=MODELS[0])
120
- thinking_budget = gr.Slider(minimum=1, maximum=100, value=10, step=1, label="Thinking Budget", info="maximum times a model can think")
121
-
122
- chatbot = gr.Chatbot(label="Chat", show_label=False, show_share_button=False, show_copy_button=True, likeable=True, layout="panel", type="messages")
123
-
124
  msg = gr.Textbox(label="Type your message here...", placeholder="Enter your message...")
 
 
 
 
 
 
125
 
126
- gr.Button("Clear Chat").click(lambda: ([], ""), inputs=None, outputs=[chatbot, msg])
127
-
128
- system_prompt = gr.Textbox(label="System Prompt", value=DEFAULT_SYSTEM_PROMPT, lines=15, interactive=True)
129
-
130
- msg.submit(generate, inputs=[msg, chatbot, model, system_prompt, thinking_budget, api_key], outputs=[chatbot, msg])
131
 
132
- demo.launch(share=True, show_api=False)
 
1
  import gradio as gr
2
  import time
3
  import re
 
 
 
 
 
 
 
 
 
4
 
5
+ MODELS = ["Mixtral-8x7B-Instruct-v0.1"]
6
 
7
  def chat_with_ai(message, chat_history, system_prompt):
8
  """Formats the chat history for the API call."""
9
  messages = [{"role": "system", "content": system_prompt}]
10
+ for item in chat_history:
11
+ messages.append({"role": "user", "content": item["user"]})
12
+ messages.append({"role": "assistant", "content": item.get("assistant", "")})
 
 
13
  messages.append({"role": "user", "content": message})
14
  return messages
15
 
16
+ def respond(message, chat_history, model, system_prompt, thinking_budget):
17
+ """Simulate API call and get the response. Replace with actual API call."""
18
+ # Simulate a delay
19
+ time.sleep(1)
20
+ # Dummy response
21
+ response = f"Simulated response for: {message}"
22
+ return response, 1.0
 
 
 
 
 
 
 
 
 
 
 
 
23
 
24
+ def generate(message, history, model, system_prompt, thinking_budget):
 
 
 
 
 
 
 
 
 
25
  """Generates the chatbot response."""
26
+ response, thinking_time = respond(message, history, model, system_prompt, thinking_budget)
27
+ history.append({"user": message, "assistant": response})
28
+ return history, ""
 
 
 
 
 
 
 
 
 
 
 
 
 
 
29
 
30
  # Define the default system prompt
31
  DEFAULT_SYSTEM_PROMPT = """
32
  You are a helpful assistant in normal conversation.
33
+ When given a problem to solve, you are an expert problem-solving assistant.
34
+ Your task is to provide a detailed, step-by-step solution to a given question.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
35
  ...
 
 
 
 
 
36
  """
37
 
38
  with gr.Blocks() as demo:
39
+ gr.Markdown("# Custom Chat Interface")
40
+
 
41
  with gr.Row():
42
  model = gr.Dropdown(choices=MODELS, label="Select Model", value=MODELS[0])
43
+ thinking_budget = gr.Slider(minimum=1, maximum=100, value=10, step=1, label="Thinking Budget")
44
+
45
+ system_prompt = gr.Textbox(value=DEFAULT_SYSTEM_PROMPT, lines=15, label="System Prompt")
46
+ chatbot = gr.Chatbot(label="Chat", type="messages")
47
  msg = gr.Textbox(label="Type your message here...", placeholder="Enter your message...")
48
+
49
+ # Clear chat
50
+ def clear_chat():
51
+ return [], ""
52
+
53
+ gr.Button("Clear Chat").click(clear_chat, inputs=None, outputs=[chatbot, msg])
54
 
55
+ # Generate response on message submission
56
+ msg.submit(generate, inputs=[msg, chatbot, model, system_prompt, thinking_budget], outputs=[chatbot, msg])
 
 
 
57
 
58
+ demo.launch()