Spaces:
Sleeping
Sleeping
File size: 5,963 Bytes
0588a72 3ad05c5 d0d60f4 0588a72 995a0e5 0588a72 079c63d 0588a72 079c63d 0588a72 3ad05c5 0588a72 3ad05c5 d0d60f4 079c63d aa05415 0588a72 aa05415 3ad05c5 aa05415 3ad05c5 aa05415 d0d60f4 995a0e5 079c63d 0588a72 079c63d d0d60f4 079c63d 3ad05c5 079c63d d0d60f4 0588a72 079c63d 0588a72 3ad05c5 aa05415 0588a72 d0d60f4 aa05415 0588a72 079c63d 0588a72 3ad05c5 d0d60f4 3ad05c5 0588a72 aa05415 0588a72 d0d60f4 0588a72 d0d60f4 0588a72 d0d60f4 3ad05c5 0588a72 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 |
import gradio as gr
import requests
import re
import os
API_ENDPOINT = os.getenv("API_ENDPOINT", "none")
API_TOKEN = os.getenv("API_TOKEN")
def get_ai_response(message, history):
"""Fetch AI response from the API using the modern messages format."""
messages = [
{"role": "system", "content": "You are a helpful assistant."}
] + history + [{"role": "user", "content": message}]
payload = {
"model": "RekaAI/reka-flash-3",
"messages": messages,
"stream": False,
"max_tokens": 1024,
"temperature": 0.7
}
headers = {
"Authorization": f"Bearer {API_TOKEN}",
"Content-Type": "application/json"
}
try:
response = requests.post(API_ENDPOINT, headers=headers, json=payload)
response.raise_for_status()
raw_response = response.json()["choices"][0]["message"]["content"]
# Convert reasoning tags to collapsible HTML
html_response = convert_reasoning_to_collapsible(raw_response)
return html_response
except Exception as e:
return f"Error: {str(e)}"
def convert_reasoning_to_collapsible(text):
"""Convert reasoning tags to collapsible HTML sections."""
# Find all reasoning sections
reasoning_pattern = re.compile(r'<reasoning>(.*?)</reasoning>', re.DOTALL)
# Function to replace each reasoning section with collapsible HTML
def replace_with_collapsible(match):
reasoning_content = match.group(1).strip()
return f'<details><summary><strong>See reasoning</strong></summary><div class="reasoning-content">{reasoning_content}</div></details>'
# Replace reasoning tags with collapsible sections
html_response = reasoning_pattern.sub(replace_with_collapsible, text)
# Remove <sep> tags
html_response = re.sub(r'<sep>.*?</sep>', '', html_response, flags=re.DOTALL)
html_response = html_response.replace('<sep>', '').replace('</sep>', '')
return html_response
def chat_interface(message, history):
"""Handle chat interactions and update history."""
if not history:
history = []
# Convert history to the format expected by the API
api_history = []
for user_msg, ai_msg in history:
# Remove HTML tags for API history
clean_ai_msg = re.sub(r'<details>.*?</details>', '', ai_msg, flags=re.DOTALL)
clean_ai_msg = re.sub(r'<[^>]*>', '', clean_ai_msg)
api_history.append({"role": "user", "content": user_msg})
api_history.append({"role": "assistant", "content": clean_ai_msg})
ai_response = get_ai_response(message, api_history)
# Update history in the format expected by Gradio chatbot
history.append((message, ai_response))
return history
# Modern CSS for a clean UI
custom_css = """
body { background-color: #1a1a1a; color: #ffffff; font-family: 'Arial', sans-serif; }
#chatbot { height: 80vh; background-color: #2d2d2d; border: 1px solid #404040; border-radius: 8px; }
input, button { background-color: #333333; color: #ffffff; border: 1px solid #404040; border-radius: 5px; }
button:hover { background-color: #404040; }
details { background-color: #333333; padding: 10px; margin: 5px 0; border-radius: 5px; }
summary { cursor: pointer; color: #70a9e6; }
.reasoning-content { padding: 10px; margin-top: 5px; background-color: #404040; border-radius: 5px; }
"""
# Build the Gradio app
with gr.Blocks(css=custom_css, title="Reka Flash 3") as demo:
with gr.Column():
gr.Markdown("## Reka Flash 3")
gr.Markdown("This assistant shows reasoning in collapsible sections.")
chatbot = gr.Chatbot(elem_id="chatbot", render_markdown=False, bubble_full_width=True)
with gr.Row():
message = gr.Textbox(placeholder="Type your message...", show_label=False, container=False)
submit_btn = gr.Button("Send", size="sm")
clear_chat_btn = gr.Button("Clear Chat")
# State management
chat_state = gr.State([]) # Current chat history
# JavaScript for enabling HTML in chatbot
js = """
function() {
// Add event listener for when new messages are added
const observer = new MutationObserver(function(mutations) {
mutations.forEach(function(mutation) {
if (mutation.addedNodes.length) {
document.querySelectorAll('#chatbot .message:not(.processed)').forEach(msg => {
msg.classList.add('processed');
// Replace content with innerHTML to render HTML
const content = msg.querySelector('.content');
if (content) {
content.innerHTML = content.textContent;
}
});
}
});
});
// Start observing chatbot for changes
const chatbot = document.getElementById('chatbot');
if (chatbot) {
observer.observe(chatbot, { childList: true, subtree: true });
}
return [];
}
"""
# Event handlers
submit_btn.click(
chat_interface,
[message, chat_state],
[chat_state]
).then(
lambda history: history,
chat_state,
chatbot
).then(
lambda: "", # Clear the input box
None,
message
)
# Message submit via Enter key
message.submit(
chat_interface,
[message, chat_state],
[chat_state]
).then(
lambda history: history,
chat_state,
chatbot
).then(
lambda: "", # Clear the input box
None,
message
)
clear_chat_btn.click(
lambda: [],
None,
[chat_state, chatbot]
)
# Load JavaScript for HTML rendering
demo.load(
fn=lambda: None,
inputs=None,
outputs=None,
js=js
)
demo.launch() |