Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -5,6 +5,10 @@ import json
|
|
5 |
import base64
|
6 |
from PIL import Image
|
7 |
import io
|
|
|
|
|
|
|
|
|
8 |
|
9 |
ACCESS_TOKEN = os.getenv("HF_TOKEN")
|
10 |
print("Access token loaded.")
|
@@ -39,69 +43,107 @@ def encode_image(image_path):
|
|
39 |
print(f"Error encoding image: {e}")
|
40 |
return None
|
41 |
|
42 |
-
|
43 |
-
|
44 |
-
|
45 |
-
|
46 |
-
|
47 |
-
|
48 |
-
|
49 |
-
|
50 |
-
|
51 |
-
|
52 |
-
|
53 |
-
|
54 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
55 |
Args:
|
56 |
-
|
57 |
-
|
58 |
-
|
59 |
-
|
60 |
-
top_p (float): Top-p sampling parameter.
|
61 |
-
frequency_penalty (float): Penalty for frequent tokens.
|
62 |
-
provider (str): Inference provider (e.g., 'hf-inference').
|
63 |
-
model (str): Model identifier (e.g., 'meta-llama/Llama-3.2-11B-Vision-Instruct').
|
64 |
-
|
65 |
Returns:
|
66 |
-
|
67 |
"""
|
68 |
-
|
69 |
-
|
70 |
-
# Initialize the Inference Client
|
71 |
-
client = InferenceClient(token=ACCESS_TOKEN, provider=provider)
|
72 |
-
print(f"Inference Client initialized with {provider} provider.")
|
73 |
-
|
74 |
-
# Prepare messages
|
75 |
-
messages = [
|
76 |
-
{"role": "system", "content": system_message},
|
77 |
-
{"role": "user", "content": message}
|
78 |
-
]
|
79 |
|
80 |
-
# Prepare parameters
|
81 |
-
parameters = {
|
82 |
-
"max_tokens": max_tokens,
|
83 |
-
"temperature": temperature,
|
84 |
-
"top_p": top_p,
|
85 |
-
"frequency_penalty": frequency_penalty,
|
86 |
-
}
|
87 |
-
|
88 |
try:
|
89 |
-
#
|
90 |
-
|
91 |
-
|
92 |
-
|
93 |
-
|
94 |
-
|
95 |
-
)
|
96 |
-
|
97 |
-
|
98 |
-
|
99 |
-
return
|
100 |
-
|
101 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
102 |
except Exception as e:
|
103 |
-
print(f"Error
|
104 |
-
return
|
105 |
|
106 |
def respond(
|
107 |
message,
|
@@ -117,7 +159,10 @@ def respond(
|
|
117 |
custom_api_key,
|
118 |
custom_model,
|
119 |
model_search_term,
|
120 |
-
selected_model
|
|
|
|
|
|
|
121 |
):
|
122 |
print(f"Received message: {message}")
|
123 |
print(f"Received {len(image_files) if image_files else 0} images")
|
@@ -130,6 +175,8 @@ def respond(
|
|
130 |
print(f"Selected model (custom_model): {custom_model}")
|
131 |
print(f"Model search term: {model_search_term}")
|
132 |
print(f"Selected model from radio: {selected_model}")
|
|
|
|
|
133 |
|
134 |
# Determine which token to use
|
135 |
token_to_use = custom_api_key if custom_api_key.strip() != "" else ACCESS_TOKEN
|
@@ -274,6 +321,19 @@ def respond(
|
|
274 |
yield response
|
275 |
|
276 |
print("Completed response generation.")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
277 |
|
278 |
# Function to validate provider selection based on BYOK
|
279 |
def validate_provider(api_key, provider):
|
@@ -281,6 +341,22 @@ def validate_provider(api_key, provider):
|
|
281 |
return gr.update(value="hf-inference")
|
282 |
return gr.update(value=provider)
|
283 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
284 |
# GRADIO UI
|
285 |
with gr.Blocks(theme="Nymbo/Nymbo_Theme") as demo:
|
286 |
# Create the chatbot component
|
@@ -303,8 +379,6 @@ with gr.Blocks(theme="Nymbo/Nymbo_Theme") as demo:
|
|
303 |
sources=["upload"]
|
304 |
)
|
305 |
|
306 |
-
# Note: We're removing the separate submit button since MultimodalTextbox has its own
|
307 |
-
|
308 |
# Create accordion for settings
|
309 |
with gr.Accordion("Settings", open=False):
|
310 |
# System message
|
@@ -402,6 +476,7 @@ with gr.Blocks(theme="Nymbo/Nymbo_Theme") as demo:
|
|
402 |
)
|
403 |
|
404 |
# Featured models list
|
|
|
405 |
models_list = [
|
406 |
"meta-llama/Llama-3.2-11B-Vision-Instruct",
|
407 |
"meta-llama/Llama-3.3-70B-Instruct",
|
@@ -431,37 +506,67 @@ with gr.Blocks(theme="Nymbo/Nymbo_Theme") as demo:
|
|
431 |
featured_model_radio = gr.Radio(
|
432 |
label="Select a model below",
|
433 |
choices=models_list,
|
434 |
-
value="meta-llama/Llama-3.2-11B-Vision-Instruct",
|
435 |
interactive=True
|
436 |
)
|
437 |
|
438 |
gr.Markdown("[View all Text-to-Text models](https://huggingface.co/models?inference_provider=all&pipeline_tag=text-generation&sort=trending) | [View all multimodal models](https://huggingface.co/models?inference_provider=all&pipeline_tag=image-text-to-text&sort=trending)")
|
439 |
-
|
440 |
-
#
|
441 |
-
with gr.Accordion("MCP
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
442 |
gr.Markdown("""
|
443 |
-
### MCP Support
|
444 |
|
445 |
-
This app
|
446 |
|
447 |
-
|
448 |
|
449 |
-
|
450 |
-
|
451 |
-
|
452 |
-
|
453 |
-
|
454 |
-
}
|
455 |
-
}
|
456 |
-
}
|
457 |
-
```
|
458 |
|
459 |
-
|
460 |
""")
|
461 |
|
462 |
# Chat history state
|
463 |
chat_history = gr.State([])
|
464 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
465 |
# Function to filter models
|
466 |
def filter_models(search_term):
|
467 |
print(f"Filtering models with search term: {search_term}")
|
@@ -476,6 +581,7 @@ with gr.Blocks(theme="Nymbo/Nymbo_Theme") as demo:
|
|
476 |
|
477 |
# Function for the chat interface
|
478 |
def user(user_message, history):
|
|
|
479 |
print(f"User message received: {user_message}")
|
480 |
|
481 |
# Skip if message is empty (no text and no files)
|
@@ -499,6 +605,7 @@ with gr.Blocks(theme="Nymbo/Nymbo_Theme") as demo:
|
|
499 |
if files and len(files) > 0:
|
500 |
# Add text message first if it exists
|
501 |
if text_content:
|
|
|
502 |
print(f"Adding text message: {text_content}")
|
503 |
history.append([text_content, None])
|
504 |
|
@@ -506,6 +613,7 @@ with gr.Blocks(theme="Nymbo/Nymbo_Theme") as demo:
|
|
506 |
for file_path in files:
|
507 |
if file_path and isinstance(file_path, str):
|
508 |
print(f"Adding image: {file_path}")
|
|
|
509 |
history.append([f"", None])
|
510 |
|
511 |
return history
|
@@ -516,11 +624,13 @@ with gr.Blocks(theme="Nymbo/Nymbo_Theme") as demo:
|
|
516 |
return history
|
517 |
|
518 |
# Define bot response function
|
519 |
-
def bot(history, system_msg, max_tokens, temperature, top_p, freq_penalty, seed, provider, api_key, custom_model, search_term, selected_model):
|
|
|
520 |
if not history or len(history) == 0:
|
521 |
print("No history to process")
|
522 |
return history
|
523 |
|
|
|
524 |
user_message = history[-1][0]
|
525 |
print(f"Processing user message: {user_message}")
|
526 |
|
@@ -528,26 +638,33 @@ with gr.Blocks(theme="Nymbo/Nymbo_Theme") as demo:
|
|
528 |
image_path = None
|
529 |
text_content = user_message
|
530 |
|
|
|
531 |
if isinstance(user_message, str) and user_message.startswith(":
|
532 |
is_image = True
|
|
|
533 |
image_path = user_message.replace(".replace(")", "")
|
534 |
print(f"Image detected: {image_path}")
|
535 |
-
text_content = ""
|
536 |
|
|
|
537 |
text_context = ""
|
538 |
if is_image and len(history) > 1:
|
|
|
539 |
prev_message = history[-2][0]
|
540 |
if isinstance(prev_message, str) and not prev_message.startswith(":
|
541 |
text_context = prev_message
|
542 |
print(f"Using text context from previous message: {text_context}")
|
543 |
|
|
|
544 |
history[-1][1] = ""
|
545 |
|
|
|
546 |
if is_image:
|
|
|
547 |
for response in respond(
|
548 |
-
text_context,
|
549 |
-
[image_path],
|
550 |
-
history[:-1],
|
551 |
system_msg,
|
552 |
max_tokens,
|
553 |
temperature,
|
@@ -558,15 +675,19 @@ with gr.Blocks(theme="Nymbo/Nymbo_Theme") as demo:
|
|
558 |
api_key,
|
559 |
custom_model,
|
560 |
search_term,
|
561 |
-
selected_model
|
|
|
|
|
|
|
562 |
):
|
563 |
history[-1][1] = response
|
564 |
yield history
|
565 |
else:
|
|
|
566 |
for response in respond(
|
567 |
-
text_content,
|
568 |
-
None,
|
569 |
-
history[:-1],
|
570 |
system_msg,
|
571 |
max_tokens,
|
572 |
temperature,
|
@@ -577,12 +698,15 @@ with gr.Blocks(theme="Nymbo/Nymbo_Theme") as demo:
|
|
577 |
api_key,
|
578 |
custom_model,
|
579 |
search_term,
|
580 |
-
selected_model
|
|
|
|
|
|
|
581 |
):
|
582 |
history[-1][1] = response
|
583 |
yield history
|
584 |
|
585 |
-
# Event handlers
|
586 |
msg.submit(
|
587 |
user,
|
588 |
[msg, chatbot],
|
@@ -592,14 +716,15 @@ with gr.Blocks(theme="Nymbo/Nymbo_Theme") as demo:
|
|
592 |
bot,
|
593 |
[chatbot, system_message_box, max_tokens_slider, temperature_slider, top_p_slider,
|
594 |
frequency_penalty_slider, seed_slider, provider_radio, byok_textbox, custom_model_box,
|
595 |
-
model_search_box, featured_model_radio],
|
596 |
[chatbot]
|
597 |
).then(
|
598 |
-
lambda: {"text": "", "files": []},
|
599 |
None,
|
600 |
[msg]
|
601 |
)
|
602 |
|
|
|
603 |
model_search_box.change(
|
604 |
fn=filter_models,
|
605 |
inputs=model_search_box,
|
@@ -607,13 +732,15 @@ with gr.Blocks(theme="Nymbo/Nymbo_Theme") as demo:
|
|
607 |
)
|
608 |
print("Model search box change event linked.")
|
609 |
|
|
|
610 |
featured_model_radio.change(
|
611 |
fn=set_custom_model_from_radio,
|
612 |
inputs=featured_model_radio,
|
613 |
outputs=custom_model_box
|
614 |
)
|
615 |
print("Featured model radio button change event linked.")
|
616 |
-
|
|
|
617 |
byok_textbox.change(
|
618 |
fn=validate_provider,
|
619 |
inputs=[byok_textbox, provider_radio],
|
@@ -621,6 +748,7 @@ with gr.Blocks(theme="Nymbo/Nymbo_Theme") as demo:
|
|
621 |
)
|
622 |
print("BYOK textbox change event linked.")
|
623 |
|
|
|
624 |
provider_radio.change(
|
625 |
fn=validate_provider,
|
626 |
inputs=[byok_textbox, provider_radio],
|
@@ -632,4 +760,4 @@ print("Gradio interface initialized.")
|
|
632 |
|
633 |
if __name__ == "__main__":
|
634 |
print("Launching the demo application.")
|
635 |
-
demo.launch(show_api=True
|
|
|
5 |
import base64
|
6 |
from PIL import Image
|
7 |
import io
|
8 |
+
import requests
|
9 |
+
from mcp.client.sse import SSEServerParameters
|
10 |
+
from mcp.jsonrpc.client import JsonRpcClient
|
11 |
+
from mcp.client.base import ServerCapabilities
|
12 |
|
13 |
ACCESS_TOKEN = os.getenv("HF_TOKEN")
|
14 |
print("Access token loaded.")
|
|
|
43 |
print(f"Error encoding image: {e}")
|
44 |
return None
|
45 |
|
46 |
+
# MCP Client class for handling MCP server connections
|
47 |
+
class MCPClient:
|
48 |
+
def __init__(self, url):
|
49 |
+
self.url = url
|
50 |
+
self.client = None
|
51 |
+
self.capabilities = None
|
52 |
+
self.tools = None
|
53 |
+
|
54 |
+
def connect(self):
|
55 |
+
try:
|
56 |
+
# Connect to the MCP server using SSE
|
57 |
+
server_params = SSEServerParameters(url=self.url)
|
58 |
+
self.client = JsonRpcClient(server_params)
|
59 |
+
self.client.connect()
|
60 |
+
|
61 |
+
# Get server capabilities
|
62 |
+
self.capabilities = ServerCapabilities(self.client)
|
63 |
+
|
64 |
+
# List available tools
|
65 |
+
self.tools = self.capabilities.list_tools()
|
66 |
+
print(f"Connected to MCP Server. Available tools: {[tool.name for tool in self.tools]}")
|
67 |
+
return True
|
68 |
+
except Exception as e:
|
69 |
+
print(f"Error connecting to MCP server: {e}")
|
70 |
+
return False
|
71 |
+
|
72 |
+
def call_tool(self, tool_name, **kwargs):
|
73 |
+
if not self.client or not self.tools:
|
74 |
+
print("MCP client not initialized or no tools available")
|
75 |
+
return None
|
76 |
+
|
77 |
+
# Find the tool with the given name
|
78 |
+
tool = next((t for t in self.tools if t.name == tool_name), None)
|
79 |
+
if not tool:
|
80 |
+
print(f"Tool '{tool_name}' not found")
|
81 |
+
return None
|
82 |
+
|
83 |
+
try:
|
84 |
+
# Call the tool with the given arguments
|
85 |
+
result = self.client.call_method("tools/call", {"name": tool_name, "arguments": kwargs})
|
86 |
+
return result
|
87 |
+
except Exception as e:
|
88 |
+
print(f"Error calling tool '{tool_name}': {e}")
|
89 |
+
return None
|
90 |
+
|
91 |
+
def close(self):
|
92 |
+
if self.client:
|
93 |
+
try:
|
94 |
+
self.client.close()
|
95 |
+
print("MCP client connection closed")
|
96 |
+
except Exception as e:
|
97 |
+
print(f"Error closing MCP client connection: {e}")
|
98 |
+
|
99 |
+
# Function to convert text to audio using Kokoro MCP server
|
100 |
+
def text_to_audio(text, speed=1.0, mcp_url=None):
|
101 |
+
"""Convert text to audio using Kokoro MCP server if available.
|
102 |
+
|
103 |
Args:
|
104 |
+
text (str): Text to convert to speech
|
105 |
+
speed (float): Speed multiplier for speech
|
106 |
+
mcp_url (str): URL of the Kokoro MCP server
|
107 |
+
|
|
|
|
|
|
|
|
|
|
|
108 |
Returns:
|
109 |
+
tuple: (sample_rate, audio_array) or None if conversion fails
|
110 |
"""
|
111 |
+
if not text or not mcp_url:
|
112 |
+
return None
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
113 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
114 |
try:
|
115 |
+
# Connect to MCP server
|
116 |
+
mcp_client = MCPClient(mcp_url)
|
117 |
+
if not mcp_client.connect():
|
118 |
+
return None
|
119 |
+
|
120 |
+
# Call the text_to_audio tool
|
121 |
+
result = mcp_client.call_tool("text_to_audio", text=text, speed=speed)
|
122 |
+
mcp_client.close()
|
123 |
+
|
124 |
+
if not result:
|
125 |
+
return None
|
126 |
+
|
127 |
+
# Process the result - convert base64 audio to numpy array
|
128 |
+
import numpy as np
|
129 |
+
import base64
|
130 |
+
|
131 |
+
# Assuming the result contains base64-encoded WAV data
|
132 |
+
audio_b64 = result
|
133 |
+
audio_data = base64.b64decode(audio_b64)
|
134 |
+
|
135 |
+
# Convert to numpy array - this is simplified and may need adjustment
|
136 |
+
# based on the actual output format from the Kokoro MCP server
|
137 |
+
import io
|
138 |
+
import soundfile as sf
|
139 |
+
|
140 |
+
audio_io = io.BytesIO(audio_data)
|
141 |
+
audio_array, sample_rate = sf.read(audio_io)
|
142 |
+
|
143 |
+
return (sample_rate, audio_array)
|
144 |
except Exception as e:
|
145 |
+
print(f"Error converting text to audio: {e}")
|
146 |
+
return None
|
147 |
|
148 |
def respond(
|
149 |
message,
|
|
|
159 |
custom_api_key,
|
160 |
custom_model,
|
161 |
model_search_term,
|
162 |
+
selected_model,
|
163 |
+
mcp_server_url=None,
|
164 |
+
tts_enabled=False,
|
165 |
+
tts_speed=1.0
|
166 |
):
|
167 |
print(f"Received message: {message}")
|
168 |
print(f"Received {len(image_files) if image_files else 0} images")
|
|
|
175 |
print(f"Selected model (custom_model): {custom_model}")
|
176 |
print(f"Model search term: {model_search_term}")
|
177 |
print(f"Selected model from radio: {selected_model}")
|
178 |
+
print(f"MCP Server URL: {mcp_server_url}")
|
179 |
+
print(f"TTS Enabled: {tts_enabled}")
|
180 |
|
181 |
# Determine which token to use
|
182 |
token_to_use = custom_api_key if custom_api_key.strip() != "" else ACCESS_TOKEN
|
|
|
321 |
yield response
|
322 |
|
323 |
print("Completed response generation.")
|
324 |
+
|
325 |
+
# If TTS is enabled and we have a valid MCP server URL, convert response to audio
|
326 |
+
if tts_enabled and mcp_server_url and response:
|
327 |
+
try:
|
328 |
+
print(f"Converting response to audio using MCP server: {mcp_server_url}")
|
329 |
+
audio_data = text_to_audio(response, tts_speed, mcp_server_url)
|
330 |
+
if audio_data:
|
331 |
+
# Here we would need to handle returning both text and audio
|
332 |
+
# This would require modifying the Gradio interface to support this
|
333 |
+
print("Successfully converted text to audio")
|
334 |
+
# For now, we'll just return the text response
|
335 |
+
except Exception as e:
|
336 |
+
print(f"Error converting text to audio: {e}")
|
337 |
|
338 |
# Function to validate provider selection based on BYOK
|
339 |
def validate_provider(api_key, provider):
|
|
|
341 |
return gr.update(value="hf-inference")
|
342 |
return gr.update(value=provider)
|
343 |
|
344 |
+
# Function to test MCP server connection
|
345 |
+
def test_mcp_connection(mcp_url):
|
346 |
+
if not mcp_url or not mcp_url.strip():
|
347 |
+
return "Please enter an MCP server URL"
|
348 |
+
|
349 |
+
try:
|
350 |
+
mcp_client = MCPClient(mcp_url)
|
351 |
+
if mcp_client.connect():
|
352 |
+
tools = [tool.name for tool in mcp_client.tools]
|
353 |
+
mcp_client.close()
|
354 |
+
return f"Successfully connected to MCP server. Available tools: {', '.join(tools)}"
|
355 |
+
else:
|
356 |
+
return "Failed to connect to MCP server"
|
357 |
+
except Exception as e:
|
358 |
+
return f"Error connecting to MCP server: {str(e)}"
|
359 |
+
|
360 |
# GRADIO UI
|
361 |
with gr.Blocks(theme="Nymbo/Nymbo_Theme") as demo:
|
362 |
# Create the chatbot component
|
|
|
379 |
sources=["upload"]
|
380 |
)
|
381 |
|
|
|
|
|
382 |
# Create accordion for settings
|
383 |
with gr.Accordion("Settings", open=False):
|
384 |
# System message
|
|
|
476 |
)
|
477 |
|
478 |
# Featured models list
|
479 |
+
# Updated to include multimodal models
|
480 |
models_list = [
|
481 |
"meta-llama/Llama-3.2-11B-Vision-Instruct",
|
482 |
"meta-llama/Llama-3.3-70B-Instruct",
|
|
|
506 |
featured_model_radio = gr.Radio(
|
507 |
label="Select a model below",
|
508 |
choices=models_list,
|
509 |
+
value="meta-llama/Llama-3.2-11B-Vision-Instruct", # Default to a multimodal model
|
510 |
interactive=True
|
511 |
)
|
512 |
|
513 |
gr.Markdown("[View all Text-to-Text models](https://huggingface.co/models?inference_provider=all&pipeline_tag=text-generation&sort=trending) | [View all multimodal models](https://huggingface.co/models?inference_provider=all&pipeline_tag=image-text-to-text&sort=trending)")
|
514 |
+
|
515 |
+
# New Accordion for MCP Settings
|
516 |
+
with gr.Accordion("MCP Server Settings", open=False):
|
517 |
+
mcp_server_url = gr.Textbox(
|
518 |
+
value="",
|
519 |
+
label="MCP Server URL",
|
520 |
+
info="Enter the URL of an MCP server to connect to (e.g., https://example-kokoro-mcp.hf.space/gradio_api/mcp/sse)",
|
521 |
+
placeholder="https://fdaudens-kokoro-mcp.hf.space/gradio_api/mcp/sse"
|
522 |
+
)
|
523 |
+
|
524 |
+
test_connection_btn = gr.Button("Test Connection")
|
525 |
+
connection_status = gr.Textbox(
|
526 |
+
label="Connection Status",
|
527 |
+
interactive=False
|
528 |
+
)
|
529 |
+
|
530 |
+
tts_enabled = gr.Checkbox(
|
531 |
+
label="Enable Text-to-Speech",
|
532 |
+
value=False,
|
533 |
+
info="Convert AI responses to speech using the Kokoro TTS service"
|
534 |
+
)
|
535 |
+
|
536 |
+
tts_speed = gr.Slider(
|
537 |
+
minimum=0.5,
|
538 |
+
maximum=2.0,
|
539 |
+
value=1.0,
|
540 |
+
step=0.1,
|
541 |
+
label="Speech Speed"
|
542 |
+
)
|
543 |
+
|
544 |
gr.Markdown("""
|
545 |
+
### About MCP Support
|
546 |
|
547 |
+
This app can connect to Model Context Protocol (MCP) servers to extend its capabilities.
|
548 |
|
549 |
+
For example, connecting to a Kokoro MCP server allows for text-to-speech conversion.
|
550 |
|
551 |
+
To use this feature:
|
552 |
+
1. Enter the MCP server URL
|
553 |
+
2. Test the connection
|
554 |
+
3. Enable the desired features (e.g., TTS)
|
555 |
+
4. Chat normally with the AI
|
|
|
|
|
|
|
|
|
556 |
|
557 |
+
Note: TTS functionality requires an active connection to a Kokoro MCP server.
|
558 |
""")
|
559 |
|
560 |
# Chat history state
|
561 |
chat_history = gr.State([])
|
562 |
|
563 |
+
# Connect the test connection button
|
564 |
+
test_connection_btn.click(
|
565 |
+
fn=test_mcp_connection,
|
566 |
+
inputs=[mcp_server_url],
|
567 |
+
outputs=[connection_status]
|
568 |
+
)
|
569 |
+
|
570 |
# Function to filter models
|
571 |
def filter_models(search_term):
|
572 |
print(f"Filtering models with search term: {search_term}")
|
|
|
581 |
|
582 |
# Function for the chat interface
|
583 |
def user(user_message, history):
|
584 |
+
# Debug logging for troubleshooting
|
585 |
print(f"User message received: {user_message}")
|
586 |
|
587 |
# Skip if message is empty (no text and no files)
|
|
|
605 |
if files and len(files) > 0:
|
606 |
# Add text message first if it exists
|
607 |
if text_content:
|
608 |
+
# Add a separate text message
|
609 |
print(f"Adding text message: {text_content}")
|
610 |
history.append([text_content, None])
|
611 |
|
|
|
613 |
for file_path in files:
|
614 |
if file_path and isinstance(file_path, str):
|
615 |
print(f"Adding image: {file_path}")
|
616 |
+
# Add image as a separate message with no text
|
617 |
history.append([f"", None])
|
618 |
|
619 |
return history
|
|
|
624 |
return history
|
625 |
|
626 |
# Define bot response function
|
627 |
+
def bot(history, system_msg, max_tokens, temperature, top_p, freq_penalty, seed, provider, api_key, custom_model, search_term, selected_model, mcp_url, tts_on, tts_spd):
|
628 |
+
# Check if history is valid
|
629 |
if not history or len(history) == 0:
|
630 |
print("No history to process")
|
631 |
return history
|
632 |
|
633 |
+
# Get the most recent message and detect if it's an image
|
634 |
user_message = history[-1][0]
|
635 |
print(f"Processing user message: {user_message}")
|
636 |
|
|
|
638 |
image_path = None
|
639 |
text_content = user_message
|
640 |
|
641 |
+
# Check if this is an image message (marked with ![Image])
|
642 |
if isinstance(user_message, str) and user_message.startswith(":
|
643 |
is_image = True
|
644 |
+
# Extract image path from markdown format 
|
645 |
image_path = user_message.replace(".replace(")", "")
|
646 |
print(f"Image detected: {image_path}")
|
647 |
+
text_content = "" # No text for image-only messages
|
648 |
|
649 |
+
# Look back for text context if this is an image
|
650 |
text_context = ""
|
651 |
if is_image and len(history) > 1:
|
652 |
+
# Use the previous message as context if it's text
|
653 |
prev_message = history[-2][0]
|
654 |
if isinstance(prev_message, str) and not prev_message.startswith(":
|
655 |
text_context = prev_message
|
656 |
print(f"Using text context from previous message: {text_context}")
|
657 |
|
658 |
+
# Process message through respond function
|
659 |
history[-1][1] = ""
|
660 |
|
661 |
+
# Use either the image or text for the API
|
662 |
if is_image:
|
663 |
+
# For image messages
|
664 |
for response in respond(
|
665 |
+
text_context, # Text context from previous message if any
|
666 |
+
[image_path], # Current image
|
667 |
+
history[:-1], # Previous history
|
668 |
system_msg,
|
669 |
max_tokens,
|
670 |
temperature,
|
|
|
675 |
api_key,
|
676 |
custom_model,
|
677 |
search_term,
|
678 |
+
selected_model,
|
679 |
+
mcp_url,
|
680 |
+
tts_on,
|
681 |
+
tts_spd
|
682 |
):
|
683 |
history[-1][1] = response
|
684 |
yield history
|
685 |
else:
|
686 |
+
# For text-only messages
|
687 |
for response in respond(
|
688 |
+
text_content, # Text message
|
689 |
+
None, # No image
|
690 |
+
history[:-1], # Previous history
|
691 |
system_msg,
|
692 |
max_tokens,
|
693 |
temperature,
|
|
|
698 |
api_key,
|
699 |
custom_model,
|
700 |
search_term,
|
701 |
+
selected_model,
|
702 |
+
mcp_url,
|
703 |
+
tts_on,
|
704 |
+
tts_spd
|
705 |
):
|
706 |
history[-1][1] = response
|
707 |
yield history
|
708 |
|
709 |
+
# Event handlers - only using the MultimodalTextbox's built-in submit functionality
|
710 |
msg.submit(
|
711 |
user,
|
712 |
[msg, chatbot],
|
|
|
716 |
bot,
|
717 |
[chatbot, system_message_box, max_tokens_slider, temperature_slider, top_p_slider,
|
718 |
frequency_penalty_slider, seed_slider, provider_radio, byok_textbox, custom_model_box,
|
719 |
+
model_search_box, featured_model_radio, mcp_server_url, tts_enabled, tts_speed],
|
720 |
[chatbot]
|
721 |
).then(
|
722 |
+
lambda: {"text": "", "files": []}, # Clear inputs after submission
|
723 |
None,
|
724 |
[msg]
|
725 |
)
|
726 |
|
727 |
+
# Connect the model filter to update the radio choices
|
728 |
model_search_box.change(
|
729 |
fn=filter_models,
|
730 |
inputs=model_search_box,
|
|
|
732 |
)
|
733 |
print("Model search box change event linked.")
|
734 |
|
735 |
+
# Connect the featured model radio to update the custom model box
|
736 |
featured_model_radio.change(
|
737 |
fn=set_custom_model_from_radio,
|
738 |
inputs=featured_model_radio,
|
739 |
outputs=custom_model_box
|
740 |
)
|
741 |
print("Featured model radio button change event linked.")
|
742 |
+
|
743 |
+
# Connect the BYOK textbox to validate provider selection
|
744 |
byok_textbox.change(
|
745 |
fn=validate_provider,
|
746 |
inputs=[byok_textbox, provider_radio],
|
|
|
748 |
)
|
749 |
print("BYOK textbox change event linked.")
|
750 |
|
751 |
+
# Also validate provider when the radio changes to ensure consistency
|
752 |
provider_radio.change(
|
753 |
fn=validate_provider,
|
754 |
inputs=[byok_textbox, provider_radio],
|
|
|
760 |
|
761 |
if __name__ == "__main__":
|
762 |
print("Launching the demo application.")
|
763 |
+
demo.launch(show_api=True)
|