saneowl commited on
Commit
5ad9b18
·
verified ·
1 Parent(s): d0d60f4

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +43 -55
app.py CHANGED
@@ -1,9 +1,8 @@
1
- import gradio as gr
2
  import requests
3
  import re
4
  import os
5
 
6
-
7
  API_ENDPOINT = os.getenv("API_ENDPOINT", "none")
8
  API_TOKEN = os.getenv("API_TOKEN")
9
 
@@ -11,7 +10,18 @@ def get_ai_response(message, history):
11
  """Fetch AI response from the API using the modern messages format."""
12
  messages = [
13
  {"role": "system", "content": "You are a helpful assistant."}
14
- ] + history + [{"role": "user", "content": message}]
 
 
 
 
 
 
 
 
 
 
 
15
  payload = {
16
  "model": "RekaAI/reka-flash-3",
17
  "messages": messages,
@@ -27,8 +37,6 @@ def get_ai_response(message, history):
27
  response = requests.post(API_ENDPOINT, headers=headers, json=payload)
28
  response.raise_for_status()
29
  raw_response = response.json()["choices"][0]["message"]["content"]
30
-
31
- # Convert reasoning tags to collapsible HTML
32
  html_response = convert_reasoning_to_collapsible(raw_response)
33
  return html_response
34
  except Exception as e:
@@ -36,43 +44,31 @@ def get_ai_response(message, history):
36
 
37
  def convert_reasoning_to_collapsible(text):
38
  """Convert reasoning tags to collapsible HTML sections."""
39
- # Find all reasoning sections
40
  reasoning_pattern = re.compile(r'<reasoning>(.*?)</reasoning>', re.DOTALL)
41
-
42
- # Function to replace each reasoning section with collapsible HTML
43
  def replace_with_collapsible(match):
44
  reasoning_content = match.group(1).strip()
45
  return f'<details><summary><strong>See reasoning</strong></summary><div class="reasoning-content">{reasoning_content}</div></details>'
46
-
47
- # Replace reasoning tags with collapsible sections
48
  html_response = reasoning_pattern.sub(replace_with_collapsible, text)
49
-
50
- # Remove <sep> tags
51
  html_response = re.sub(r'<sep>.*?</sep>', '', html_response, flags=re.DOTALL)
52
  html_response = html_response.replace('<sep>', '').replace('</sep>', '')
53
-
54
  return html_response
55
 
56
- def chat_interface(message, history):
57
- """Handle chat interactions and update history."""
58
- if not history:
59
  history = []
60
-
61
- # Convert history to the format expected by the API
62
- api_history = []
63
- for user_msg, ai_msg in history:
64
- # Remove HTML tags for API history
65
- clean_ai_msg = re.sub(r'<details>.*?</details>', '', ai_msg, flags=re.DOTALL)
66
- clean_ai_msg = re.sub(r'<[^>]*>', '', clean_ai_msg)
67
-
68
- api_history.append({"role": "user", "content": user_msg})
69
- api_history.append({"role": "assistant", "content": clean_ai_msg})
70
-
71
- ai_response = get_ai_response(message, api_history)
72
-
73
- # Update history in the format expected by Gradio chatbot
74
- history.append((message, ai_response))
75
-
76
  return history
77
 
78
  # Modern CSS for a clean UI
@@ -86,7 +82,6 @@ summary { cursor: pointer; color: #70a9e6; }
86
  .reasoning-content { padding: 10px; margin-top: 5px; background-color: #404040; border-radius: 5px; }
87
  """
88
 
89
- # Build the Gradio app
90
  with gr.Blocks(css=custom_css, title="Reka Flash 3") as demo:
91
  with gr.Column():
92
  gr.Markdown("## Reka Flash 3")
@@ -94,22 +89,19 @@ with gr.Blocks(css=custom_css, title="Reka Flash 3") as demo:
94
  chatbot = gr.Chatbot(elem_id="chatbot", render_markdown=False, bubble_full_width=True)
95
  with gr.Row():
96
  message = gr.Textbox(placeholder="Type your message...", show_label=False, container=False)
97
- submit_btn = gr.Button("Send", size="sm")
 
98
  clear_chat_btn = gr.Button("Clear Chat")
99
 
100
- # State management
101
  chat_state = gr.State([]) # Current chat history
102
 
103
- # JavaScript for enabling HTML in chatbot
104
  js = """
105
  function() {
106
- // Add event listener for when new messages are added
107
  const observer = new MutationObserver(function(mutations) {
108
  mutations.forEach(function(mutation) {
109
  if (mutation.addedNodes.length) {
110
  document.querySelectorAll('#chatbot .message:not(.processed)').forEach(msg => {
111
  msg.classList.add('processed');
112
- // Replace content with innerHTML to render HTML
113
  const content = msg.querySelector('.content');
114
  if (content) {
115
  content.innerHTML = content.textContent;
@@ -118,54 +110,50 @@ with gr.Blocks(css=custom_css, title="Reka Flash 3") as demo:
118
  }
119
  });
120
  });
121
-
122
- // Start observing chatbot for changes
123
  const chatbot = document.getElementById('chatbot');
124
  if (chatbot) {
125
  observer.observe(chatbot, { childList: true, subtree: true });
126
  }
127
-
128
  return [];
129
  }
130
  """
131
 
132
- # Event handlers
133
  submit_btn.click(
134
- chat_interface,
135
  [message, chat_state],
136
- [chat_state]
137
  ).then(
138
- lambda history: history,
139
  chat_state,
140
- chatbot
141
  ).then(
142
  lambda: "", # Clear the input box
143
  None,
144
  message
145
  )
146
 
147
- # Message submit via Enter key
148
  message.submit(
149
- chat_interface,
150
  [message, chat_state],
151
- [chat_state]
152
  ).then(
153
- lambda history: history,
154
  chat_state,
155
- chatbot
156
  ).then(
157
- lambda: "", # Clear the input box
158
  None,
159
  message
160
  )
161
 
162
  clear_chat_btn.click(
163
- lambda: [],
164
  None,
165
  [chat_state, chatbot]
166
  )
167
 
168
- # Load JavaScript for HTML rendering
169
  demo.load(
170
  fn=lambda: None,
171
  inputs=None,
@@ -173,4 +161,4 @@ with gr.Blocks(css=custom_css, title="Reka Flash 3") as demo:
173
  js=js
174
  )
175
 
176
- demo.launch()
 
1
+ \import gradio as gr
2
  import requests
3
  import re
4
  import os
5
 
 
6
  API_ENDPOINT = os.getenv("API_ENDPOINT", "none")
7
  API_TOKEN = os.getenv("API_TOKEN")
8
 
 
10
  """Fetch AI response from the API using the modern messages format."""
11
  messages = [
12
  {"role": "system", "content": "You are a helpful assistant."}
13
+ ]
14
+ # Build the API history using all prior complete pairs
15
+ for user_msg, ai_msg in history:
16
+ # Only include completed exchanges (skip "pending")
17
+ if ai_msg != "pending":
18
+ clean_ai_msg = re.sub(r'<details>.*?</details>', '', ai_msg, flags=re.DOTALL)
19
+ clean_ai_msg = re.sub(r'<[^>]*>', '', clean_ai_msg)
20
+ messages.append({"role": "user", "content": user_msg})
21
+ messages.append({"role": "assistant", "content": clean_ai_msg})
22
+ # Append the new user message for which we want a response
23
+ messages.append({"role": "user", "content": message})
24
+
25
  payload = {
26
  "model": "RekaAI/reka-flash-3",
27
  "messages": messages,
 
37
  response = requests.post(API_ENDPOINT, headers=headers, json=payload)
38
  response.raise_for_status()
39
  raw_response = response.json()["choices"][0]["message"]["content"]
 
 
40
  html_response = convert_reasoning_to_collapsible(raw_response)
41
  return html_response
42
  except Exception as e:
 
44
 
45
  def convert_reasoning_to_collapsible(text):
46
  """Convert reasoning tags to collapsible HTML sections."""
 
47
  reasoning_pattern = re.compile(r'<reasoning>(.*?)</reasoning>', re.DOTALL)
 
 
48
  def replace_with_collapsible(match):
49
  reasoning_content = match.group(1).strip()
50
  return f'<details><summary><strong>See reasoning</strong></summary><div class="reasoning-content">{reasoning_content}</div></details>'
 
 
51
  html_response = reasoning_pattern.sub(replace_with_collapsible, text)
 
 
52
  html_response = re.sub(r'<sep>.*?</sep>', '', html_response, flags=re.DOTALL)
53
  html_response = html_response.replace('<sep>', '').replace('</sep>', '')
 
54
  return html_response
55
 
56
+ def add_user_message(message, history):
57
+ """Immediately add the user's message with a 'pending' assistant reply."""
58
+ if history is None:
59
  history = []
60
+ history.append((message, "pending"))
61
+ return history
62
+
63
+ def generate_response_from_history(history):
64
+ """Generate the assistant's reply and update the last pending message."""
65
+ if not history:
66
+ return history
67
+ # Get the last user message from history (which is paired with "pending")
68
+ last_user_message = history[-1][0]
69
+ # Get the full conversation (ignoring the pending reply)
70
+ ai_response = get_ai_response(last_user_message, history)
71
+ history[-1] = (last_user_message, ai_response)
 
 
 
 
72
  return history
73
 
74
  # Modern CSS for a clean UI
 
82
  .reasoning-content { padding: 10px; margin-top: 5px; background-color: #404040; border-radius: 5px; }
83
  """
84
 
 
85
  with gr.Blocks(css=custom_css, title="Reka Flash 3") as demo:
86
  with gr.Column():
87
  gr.Markdown("## Reka Flash 3")
 
89
  chatbot = gr.Chatbot(elem_id="chatbot", render_markdown=False, bubble_full_width=True)
90
  with gr.Row():
91
  message = gr.Textbox(placeholder="Type your message...", show_label=False, container=False)
92
+ # Make the button larger by using size "lg"
93
+ submit_btn = gr.Button("Send", size="lg")
94
  clear_chat_btn = gr.Button("Clear Chat")
95
 
 
96
  chat_state = gr.State([]) # Current chat history
97
 
 
98
  js = """
99
  function() {
 
100
  const observer = new MutationObserver(function(mutations) {
101
  mutations.forEach(function(mutation) {
102
  if (mutation.addedNodes.length) {
103
  document.querySelectorAll('#chatbot .message:not(.processed)').forEach(msg => {
104
  msg.classList.add('processed');
 
105
  const content = msg.querySelector('.content');
106
  if (content) {
107
  content.innerHTML = content.textContent;
 
110
  }
111
  });
112
  });
 
 
113
  const chatbot = document.getElementById('chatbot');
114
  if (chatbot) {
115
  observer.observe(chatbot, { childList: true, subtree: true });
116
  }
 
117
  return [];
118
  }
119
  """
120
 
121
+ # First, add the user message immediately (with pending), then update it with the generated response.
122
  submit_btn.click(
123
+ add_user_message,
124
  [message, chat_state],
125
+ [chat_state, chatbot]
126
  ).then(
127
+ generate_response_from_history,
128
  chat_state,
129
+ [chat_state, chatbot]
130
  ).then(
131
  lambda: "", # Clear the input box
132
  None,
133
  message
134
  )
135
 
136
+ # Allow pressing Enter to submit
137
  message.submit(
138
+ add_user_message,
139
  [message, chat_state],
140
+ [chat_state, chatbot]
141
  ).then(
142
+ generate_response_from_history,
143
  chat_state,
144
+ [chat_state, chatbot]
145
  ).then(
146
+ lambda: "",
147
  None,
148
  message
149
  )
150
 
151
  clear_chat_btn.click(
152
+ lambda: ([], []),
153
  None,
154
  [chat_state, chatbot]
155
  )
156
 
 
157
  demo.load(
158
  fn=lambda: None,
159
  inputs=None,
 
161
  js=js
162
  )
163
 
164
+ demo.launch()