Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -1,763 +1,224 @@
|
|
1 |
-
import gradio as gr
|
2 |
-
from huggingface_hub import InferenceClient
|
3 |
import os
|
4 |
import json
|
5 |
-
import base64
|
6 |
-
from PIL import Image
|
7 |
-
import io
|
8 |
import requests
|
9 |
-
|
10 |
-
from
|
11 |
-
from mcp.client.base import ServerCapabilities
|
12 |
|
13 |
-
|
14 |
-
|
|
|
15 |
|
16 |
-
|
17 |
-
|
18 |
-
|
19 |
-
|
20 |
-
|
21 |
|
22 |
-
|
23 |
-
|
|
|
24 |
|
25 |
-
|
26 |
-
|
27 |
-
|
28 |
-
|
29 |
-
|
30 |
-
|
31 |
-
|
32 |
-
# Convert to RGB if image has an alpha channel (RGBA)
|
33 |
-
if image.mode == 'RGBA':
|
34 |
-
image = image.convert('RGB')
|
35 |
-
|
36 |
-
# Encode to base64
|
37 |
-
buffered = io.BytesIO()
|
38 |
-
image.save(buffered, format="JPEG")
|
39 |
-
img_str = base64.b64encode(buffered.getvalue()).decode("utf-8")
|
40 |
-
print("Image encoded successfully")
|
41 |
-
return img_str
|
42 |
-
except Exception as e:
|
43 |
-
print(f"Error encoding image: {e}")
|
44 |
-
return None
|
45 |
-
|
46 |
-
# MCP Client class for handling MCP server connections
|
47 |
-
class MCPClient:
|
48 |
-
def __init__(self, url):
|
49 |
-
self.url = url
|
50 |
-
self.client = None
|
51 |
-
self.capabilities = None
|
52 |
-
self.tools = None
|
53 |
|
54 |
-
def connect(self):
|
|
|
|
|
|
|
|
|
|
|
|
|
55 |
try:
|
56 |
-
#
|
57 |
-
|
58 |
-
|
59 |
-
|
60 |
-
|
61 |
-
|
62 |
-
|
63 |
|
64 |
-
|
65 |
-
|
66 |
-
|
67 |
-
|
|
|
|
|
|
|
|
|
68 |
except Exception as e:
|
69 |
-
|
70 |
return False
|
71 |
|
72 |
-
def
|
73 |
-
|
74 |
-
|
75 |
-
return None
|
76 |
|
77 |
-
|
78 |
-
|
79 |
-
|
80 |
-
|
81 |
-
|
|
|
82 |
|
83 |
try:
|
84 |
-
#
|
85 |
-
|
86 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
87 |
except Exception as e:
|
88 |
-
|
89 |
-
return
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
90 |
|
91 |
def close(self):
|
92 |
-
|
|
|
93 |
try:
|
94 |
-
|
95 |
-
|
|
|
|
|
|
|
|
|
|
|
96 |
except Exception as e:
|
97 |
-
|
|
|
|
|
98 |
|
99 |
-
|
100 |
-
def
|
101 |
-
"""
|
|
|
102 |
|
103 |
-
Args:
|
104 |
-
text (str): Text to convert to speech
|
105 |
-
speed (float): Speed multiplier for speech
|
106 |
-
mcp_url (str): URL of the Kokoro MCP server
|
107 |
-
|
108 |
Returns:
|
109 |
-
|
110 |
"""
|
111 |
-
if not text or not mcp_url:
|
112 |
-
return None
|
113 |
-
|
114 |
try:
|
115 |
-
|
116 |
-
|
117 |
-
|
118 |
-
|
119 |
-
|
120 |
-
|
121 |
-
|
122 |
-
|
123 |
-
|
124 |
-
if not result:
|
125 |
-
return None
|
126 |
-
|
127 |
-
# Process the result - convert base64 audio to numpy array
|
128 |
-
import numpy as np
|
129 |
-
import base64
|
130 |
-
|
131 |
-
# Assuming the result contains base64-encoded WAV data
|
132 |
-
audio_b64 = result
|
133 |
-
audio_data = base64.b64decode(audio_b64)
|
134 |
-
|
135 |
-
# Convert to numpy array - this is simplified and may need adjustment
|
136 |
-
# based on the actual output format from the Kokoro MCP server
|
137 |
-
import io
|
138 |
-
import soundfile as sf
|
139 |
-
|
140 |
-
audio_io = io.BytesIO(audio_data)
|
141 |
-
audio_array, sample_rate = sf.read(audio_io)
|
142 |
-
|
143 |
-
return (sample_rate, audio_array)
|
144 |
except Exception as e:
|
145 |
-
|
146 |
-
return
|
147 |
-
|
148 |
-
def respond(
|
149 |
-
message,
|
150 |
-
image_files,
|
151 |
-
history: list[tuple[str, str]],
|
152 |
-
system_message,
|
153 |
-
max_tokens,
|
154 |
-
temperature,
|
155 |
-
top_p,
|
156 |
-
frequency_penalty,
|
157 |
-
seed,
|
158 |
-
provider,
|
159 |
-
custom_api_key,
|
160 |
-
custom_model,
|
161 |
-
model_search_term,
|
162 |
-
selected_model,
|
163 |
-
mcp_server_url=None,
|
164 |
-
tts_enabled=False,
|
165 |
-
tts_speed=1.0
|
166 |
-
):
|
167 |
-
print(f"Received message: {message}")
|
168 |
-
print(f"Received {len(image_files) if image_files else 0} images")
|
169 |
-
print(f"History: {history}")
|
170 |
-
print(f"System message: {system_message}")
|
171 |
-
print(f"Max tokens: {max_tokens}, Temperature: {temperature}, Top-P: {top_p}")
|
172 |
-
print(f"Frequency Penalty: {frequency_penalty}, Seed: {seed}")
|
173 |
-
print(f"Selected provider: {provider}")
|
174 |
-
print(f"Custom API Key provided: {bool(custom_api_key.strip())}")
|
175 |
-
print(f"Selected model (custom_model): {custom_model}")
|
176 |
-
print(f"Model search term: {model_search_term}")
|
177 |
-
print(f"Selected model from radio: {selected_model}")
|
178 |
-
print(f"MCP Server URL: {mcp_server_url}")
|
179 |
-
print(f"TTS Enabled: {tts_enabled}")
|
180 |
-
|
181 |
-
# Determine which token to use
|
182 |
-
token_to_use = custom_api_key if custom_api_key.strip() != "" else ACCESS_TOKEN
|
183 |
-
|
184 |
-
if custom_api_key.strip() != "":
|
185 |
-
print("USING CUSTOM API KEY: BYOK token provided by user is being used for authentication")
|
186 |
-
else:
|
187 |
-
print("USING DEFAULT API KEY: Environment variable HF_TOKEN is being used for authentication")
|
188 |
-
|
189 |
-
# Initialize the Inference Client with the provider and appropriate token
|
190 |
-
client = InferenceClient(token=token_to_use, provider=provider)
|
191 |
-
print(f"Hugging Face Inference Client initialized with {provider} provider.")
|
192 |
-
|
193 |
-
# Convert seed to None if -1 (meaning random)
|
194 |
-
if seed == -1:
|
195 |
-
seed = None
|
196 |
-
|
197 |
-
# Create multimodal content if images are present
|
198 |
-
if image_files and len(image_files) > 0:
|
199 |
-
# Process the user message to include images
|
200 |
-
user_content = []
|
201 |
-
|
202 |
-
# Add text part if there is any
|
203 |
-
if message and message.strip():
|
204 |
-
user_content.append({
|
205 |
-
"type": "text",
|
206 |
-
"text": message
|
207 |
-
})
|
208 |
-
|
209 |
-
# Add image parts
|
210 |
-
for img in image_files:
|
211 |
-
if img is not None:
|
212 |
-
# Get raw image data from path
|
213 |
-
try:
|
214 |
-
encoded_image = encode_image(img)
|
215 |
-
if encoded_image:
|
216 |
-
user_content.append({
|
217 |
-
"type": "image_url",
|
218 |
-
"image_url": {
|
219 |
-
"url": f"data:image/jpeg;base64,{encoded_image}"
|
220 |
-
}
|
221 |
-
})
|
222 |
-
except Exception as e:
|
223 |
-
print(f"Error encoding image: {e}")
|
224 |
-
else:
|
225 |
-
# Text-only message
|
226 |
-
user_content = message
|
227 |
-
|
228 |
-
# Prepare messages in the format expected by the API
|
229 |
-
messages = [{"role": "system", "content": system_message}]
|
230 |
-
print("Initial messages array constructed.")
|
231 |
|
232 |
-
# Add conversation history to the context
|
233 |
-
for val in history:
|
234 |
-
user_part = val[0]
|
235 |
-
assistant_part = val[1]
|
236 |
-
if user_part:
|
237 |
-
# Handle both text-only and multimodal messages in history
|
238 |
-
if isinstance(user_part, tuple) and len(user_part) == 2:
|
239 |
-
# This is a multimodal message with text and images
|
240 |
-
history_content = []
|
241 |
-
if user_part[0]: # Text
|
242 |
-
history_content.append({
|
243 |
-
"type": "text",
|
244 |
-
"text": user_part[0]
|
245 |
-
})
|
246 |
-
|
247 |
-
for img in user_part[1]: # Images
|
248 |
-
if img:
|
249 |
-
try:
|
250 |
-
encoded_img = encode_image(img)
|
251 |
-
if encoded_img:
|
252 |
-
history_content.append({
|
253 |
-
"type": "image_url",
|
254 |
-
"image_url": {
|
255 |
-
"url": f"data:image/jpeg;base64,{encoded_img}"
|
256 |
-
}
|
257 |
-
})
|
258 |
-
except Exception as e:
|
259 |
-
print(f"Error encoding history image: {e}")
|
260 |
-
|
261 |
-
messages.append({"role": "user", "content": history_content})
|
262 |
-
else:
|
263 |
-
# Regular text message
|
264 |
-
messages.append({"role": "user", "content": user_part})
|
265 |
-
print(f"Added user message to context (type: {type(user_part)})")
|
266 |
-
|
267 |
-
if assistant_part:
|
268 |
-
messages.append({"role": "assistant", "content": assistant_part})
|
269 |
-
print(f"Added assistant message to context: {assistant_part}")
|
270 |
|
271 |
-
|
272 |
-
|
273 |
-
|
274 |
-
|
275 |
-
# Determine which model to use, prioritizing custom_model if provided
|
276 |
-
model_to_use = custom_model.strip() if custom_model.strip() != "" else selected_model
|
277 |
-
print(f"Model selected for inference: {model_to_use}")
|
278 |
-
|
279 |
-
# Start with an empty string to build the response as tokens stream in
|
280 |
-
response = ""
|
281 |
-
print(f"Sending request to {provider} provider.")
|
282 |
-
|
283 |
-
# Prepare parameters for the chat completion request
|
284 |
-
parameters = {
|
285 |
-
"max_tokens": max_tokens,
|
286 |
-
"temperature": temperature,
|
287 |
-
"top_p": top_p,
|
288 |
-
"frequency_penalty": frequency_penalty,
|
289 |
-
}
|
290 |
|
291 |
-
|
292 |
-
|
293 |
-
|
294 |
-
# Use the InferenceClient for making the request
|
295 |
-
try:
|
296 |
-
# Create a generator for the streaming response
|
297 |
-
stream = client.chat_completion(
|
298 |
-
model=model_to_use,
|
299 |
-
messages=messages,
|
300 |
-
stream=True,
|
301 |
-
**parameters
|
302 |
-
)
|
303 |
-
|
304 |
-
print("Received tokens: ", end="", flush=True)
|
305 |
-
|
306 |
-
# Process the streaming response
|
307 |
-
for chunk in stream:
|
308 |
-
if hasattr(chunk, 'choices') and len(chunk.choices) > 0:
|
309 |
-
# Extract the content from the response
|
310 |
-
if hasattr(chunk.choices[0], 'delta') and hasattr(chunk.choices[0].delta, 'content'):
|
311 |
-
token_text = chunk.choices[0].delta.content
|
312 |
-
if token_text:
|
313 |
-
print(token_text, end="", flush=True)
|
314 |
-
response += token_text
|
315 |
-
yield response
|
316 |
|
317 |
-
|
318 |
-
|
319 |
-
|
320 |
-
|
321 |
-
yield response
|
322 |
-
|
323 |
-
print("Completed response generation.")
|
324 |
|
325 |
-
|
326 |
-
|
327 |
-
|
328 |
-
print(f"Converting response to audio using MCP server: {mcp_server_url}")
|
329 |
-
audio_data = text_to_audio(response, tts_speed, mcp_server_url)
|
330 |
-
if audio_data:
|
331 |
-
# Here we would need to handle returning both text and audio
|
332 |
-
# This would require modifying the Gradio interface to support this
|
333 |
-
print("Successfully converted text to audio")
|
334 |
-
# For now, we'll just return the text response
|
335 |
-
except Exception as e:
|
336 |
-
print(f"Error converting text to audio: {e}")
|
337 |
-
|
338 |
-
# Function to validate provider selection based on BYOK
|
339 |
-
def validate_provider(api_key, provider):
|
340 |
-
if not api_key.strip() and provider != "hf-inference":
|
341 |
-
return gr.update(value="hf-inference")
|
342 |
-
return gr.update(value=provider)
|
343 |
-
|
344 |
-
# Function to test MCP server connection
|
345 |
-
def test_mcp_connection(mcp_url):
|
346 |
-
if not mcp_url or not mcp_url.strip():
|
347 |
-
return "Please enter an MCP server URL"
|
348 |
|
349 |
-
|
350 |
-
|
351 |
-
|
352 |
-
|
353 |
-
mcp_client.close()
|
354 |
-
return f"Successfully connected to MCP server. Available tools: {', '.join(tools)}"
|
355 |
-
else:
|
356 |
-
return "Failed to connect to MCP server"
|
357 |
-
except Exception as e:
|
358 |
-
return f"Error connecting to MCP server: {str(e)}"
|
359 |
-
|
360 |
-
# GRADIO UI
|
361 |
-
with gr.Blocks(theme="Nymbo/Nymbo_Theme") as demo:
|
362 |
-
# Create the chatbot component
|
363 |
-
chatbot = gr.Chatbot(
|
364 |
-
height=600,
|
365 |
-
show_copy_button=True,
|
366 |
-
placeholder="Select a model and begin chatting. Now supports multiple inference providers and multimodal inputs",
|
367 |
-
layout="panel"
|
368 |
-
)
|
369 |
-
print("Chatbot interface created.")
|
370 |
|
371 |
-
|
372 |
-
msg = gr.MultimodalTextbox(
|
373 |
-
placeholder="Type a message or upload images...",
|
374 |
-
show_label=False,
|
375 |
-
container=False,
|
376 |
-
scale=12,
|
377 |
-
file_types=["image"],
|
378 |
-
file_count="multiple",
|
379 |
-
sources=["upload"]
|
380 |
-
)
|
381 |
|
382 |
-
|
383 |
-
|
384 |
-
|
385 |
-
|
386 |
-
|
387 |
-
|
388 |
-
|
389 |
-
|
390 |
-
|
391 |
-
|
392 |
-
|
393 |
-
with gr.Column():
|
394 |
-
max_tokens_slider = gr.Slider(
|
395 |
-
minimum=1,
|
396 |
-
maximum=4096,
|
397 |
-
value=512,
|
398 |
-
step=1,
|
399 |
-
label="Max tokens"
|
400 |
-
)
|
401 |
-
|
402 |
-
temperature_slider = gr.Slider(
|
403 |
-
minimum=0.1,
|
404 |
-
maximum=4.0,
|
405 |
-
value=0.7,
|
406 |
-
step=0.1,
|
407 |
-
label="Temperature"
|
408 |
-
)
|
409 |
-
|
410 |
-
top_p_slider = gr.Slider(
|
411 |
-
minimum=0.1,
|
412 |
-
maximum=1.0,
|
413 |
-
value=0.95,
|
414 |
-
step=0.05,
|
415 |
-
label="Top-P"
|
416 |
-
)
|
417 |
-
|
418 |
-
with gr.Column():
|
419 |
-
frequency_penalty_slider = gr.Slider(
|
420 |
-
minimum=-2.0,
|
421 |
-
maximum=2.0,
|
422 |
-
value=0.0,
|
423 |
-
step=0.1,
|
424 |
-
label="Frequency Penalty"
|
425 |
-
)
|
426 |
-
|
427 |
-
seed_slider = gr.Slider(
|
428 |
-
minimum=-1,
|
429 |
-
maximum=65535,
|
430 |
-
value=-1,
|
431 |
-
step=1,
|
432 |
-
label="Seed (-1 for random)"
|
433 |
-
)
|
434 |
-
|
435 |
-
# Provider selection
|
436 |
-
providers_list = [
|
437 |
-
"hf-inference", # Default Hugging Face Inference
|
438 |
-
"cerebras", # Cerebras provider
|
439 |
-
"together", # Together AI
|
440 |
-
"sambanova", # SambaNova
|
441 |
-
"novita", # Novita AI
|
442 |
-
"cohere", # Cohere
|
443 |
-
"fireworks-ai", # Fireworks AI
|
444 |
-
"hyperbolic", # Hyperbolic
|
445 |
-
"nebius", # Nebius
|
446 |
-
]
|
447 |
-
|
448 |
-
provider_radio = gr.Radio(
|
449 |
-
choices=providers_list,
|
450 |
-
value="hf-inference",
|
451 |
-
label="Inference Provider",
|
452 |
-
)
|
453 |
-
|
454 |
-
# New BYOK textbox
|
455 |
-
byok_textbox = gr.Textbox(
|
456 |
-
value="",
|
457 |
-
label="BYOK (Bring Your Own Key)",
|
458 |
-
info="Enter a custom Hugging Face API key here. When empty, only 'hf-inference' provider can be used.",
|
459 |
-
placeholder="Enter your Hugging Face API token",
|
460 |
-
type="password" # Hide the API key for security
|
461 |
-
)
|
462 |
-
|
463 |
-
# Custom model box
|
464 |
-
custom_model_box = gr.Textbox(
|
465 |
-
value="",
|
466 |
-
label="Custom Model",
|
467 |
-
info="(Optional) Provide a custom Hugging Face model path. Overrides any selected featured model.",
|
468 |
-
placeholder="meta-llama/Llama-3.3-70B-Instruct"
|
469 |
-
)
|
470 |
-
|
471 |
-
# Model search
|
472 |
-
model_search_box = gr.Textbox(
|
473 |
-
label="Filter Models",
|
474 |
-
placeholder="Search for a featured model...",
|
475 |
-
lines=1
|
476 |
-
)
|
477 |
-
|
478 |
-
# Featured models list
|
479 |
-
# Updated to include multimodal models
|
480 |
-
models_list = [
|
481 |
-
"meta-llama/Llama-3.2-11B-Vision-Instruct",
|
482 |
-
"meta-llama/Llama-3.3-70B-Instruct",
|
483 |
-
"meta-llama/Llama-3.1-70B-Instruct",
|
484 |
-
"meta-llama/Llama-3.0-70B-Instruct",
|
485 |
-
"meta-llama/Llama-3.2-3B-Instruct",
|
486 |
-
"meta-llama/Llama-3.2-1B-Instruct",
|
487 |
-
"meta-llama/Llama-3.1-8B-Instruct",
|
488 |
-
"NousResearch/Hermes-3-Llama-3.1-8B",
|
489 |
-
"NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO",
|
490 |
-
"mistralai/Mistral-Nemo-Instruct-2407",
|
491 |
-
"mistralai/Mixtral-8x7B-Instruct-v0.1",
|
492 |
-
"mistralai/Mistral-7B-Instruct-v0.3",
|
493 |
-
"mistralai/Mistral-7B-Instruct-v0.2",
|
494 |
-
"Qwen/Qwen3-235B-A22B",
|
495 |
-
"Qwen/Qwen3-32B",
|
496 |
-
"Qwen/Qwen2.5-72B-Instruct",
|
497 |
-
"Qwen/Qwen2.5-3B-Instruct",
|
498 |
-
"Qwen/Qwen2.5-0.5B-Instruct",
|
499 |
-
"Qwen/QwQ-32B",
|
500 |
-
"Qwen/Qwen2.5-Coder-32B-Instruct",
|
501 |
-
"microsoft/Phi-3.5-mini-instruct",
|
502 |
-
"microsoft/Phi-3-mini-128k-instruct",
|
503 |
-
"microsoft/Phi-3-mini-4k-instruct",
|
504 |
-
]
|
505 |
-
|
506 |
-
featured_model_radio = gr.Radio(
|
507 |
-
label="Select a model below",
|
508 |
-
choices=models_list,
|
509 |
-
value="meta-llama/Llama-3.2-11B-Vision-Instruct", # Default to a multimodal model
|
510 |
-
interactive=True
|
511 |
-
)
|
512 |
-
|
513 |
-
gr.Markdown("[View all Text-to-Text models](https://huggingface.co/models?inference_provider=all&pipeline_tag=text-generation&sort=trending) | [View all multimodal models](https://huggingface.co/models?inference_provider=all&pipeline_tag=image-text-to-text&sort=trending)")
|
514 |
-
|
515 |
-
# New Accordion for MCP Settings
|
516 |
-
with gr.Accordion("MCP Server Settings", open=False):
|
517 |
-
mcp_server_url = gr.Textbox(
|
518 |
-
value="",
|
519 |
-
label="MCP Server URL",
|
520 |
-
info="Enter the URL of an MCP server to connect to (e.g., https://example-kokoro-mcp.hf.space/gradio_api/mcp/sse)",
|
521 |
-
placeholder="https://fdaudens-kokoro-mcp.hf.space/gradio_api/mcp/sse"
|
522 |
-
)
|
523 |
-
|
524 |
-
test_connection_btn = gr.Button("Test Connection")
|
525 |
-
connection_status = gr.Textbox(
|
526 |
-
label="Connection Status",
|
527 |
-
interactive=False
|
528 |
-
)
|
529 |
-
|
530 |
-
tts_enabled = gr.Checkbox(
|
531 |
-
label="Enable Text-to-Speech",
|
532 |
-
value=False,
|
533 |
-
info="Convert AI responses to speech using the Kokoro TTS service"
|
534 |
-
)
|
535 |
-
|
536 |
-
tts_speed = gr.Slider(
|
537 |
-
minimum=0.5,
|
538 |
-
maximum=2.0,
|
539 |
-
value=1.0,
|
540 |
-
step=0.1,
|
541 |
-
label="Speech Speed"
|
542 |
)
|
543 |
|
544 |
-
|
545 |
-
|
546 |
-
|
547 |
-
This app can connect to Model Context Protocol (MCP) servers to extend its capabilities.
|
548 |
-
|
549 |
-
For example, connecting to a Kokoro MCP server allows for text-to-speech conversion.
|
550 |
-
|
551 |
-
To use this feature:
|
552 |
-
1. Enter the MCP server URL
|
553 |
-
2. Test the connection
|
554 |
-
3. Enable the desired features (e.g., TTS)
|
555 |
-
4. Chat normally with the AI
|
556 |
-
|
557 |
-
Note: TTS functionality requires an active connection to a Kokoro MCP server.
|
558 |
-
""")
|
559 |
-
|
560 |
-
# Chat history state
|
561 |
-
chat_history = gr.State([])
|
562 |
-
|
563 |
-
# Connect the test connection button
|
564 |
-
test_connection_btn.click(
|
565 |
-
fn=test_mcp_connection,
|
566 |
-
inputs=[mcp_server_url],
|
567 |
-
outputs=[connection_status]
|
568 |
-
)
|
569 |
-
|
570 |
-
# Function to filter models
|
571 |
-
def filter_models(search_term):
|
572 |
-
print(f"Filtering models with search term: {search_term}")
|
573 |
-
filtered = [m for m in models_list if search_term.lower() in m.lower()]
|
574 |
-
print(f"Filtered models: {filtered}")
|
575 |
-
return gr.update(choices=filtered)
|
576 |
-
|
577 |
-
# Function to set custom model from radio
|
578 |
-
def set_custom_model_from_radio(selected):
|
579 |
-
print(f"Featured model selected: {selected}")
|
580 |
-
return selected
|
581 |
-
|
582 |
-
# Function for the chat interface
|
583 |
-
def user(user_message, history):
|
584 |
-
# Debug logging for troubleshooting
|
585 |
-
print(f"User message received: {user_message}")
|
586 |
-
|
587 |
-
# Skip if message is empty (no text and no files)
|
588 |
-
if not user_message or (not user_message.get("text") and not user_message.get("files")):
|
589 |
-
print("Empty message, skipping")
|
590 |
-
return history
|
591 |
-
|
592 |
-
# Prepare multimodal message format
|
593 |
-
text_content = user_message.get("text", "").strip()
|
594 |
-
files = user_message.get("files", [])
|
595 |
-
|
596 |
-
print(f"Text content: {text_content}")
|
597 |
-
print(f"Files: {files}")
|
598 |
-
|
599 |
-
# If both text and files are empty, skip
|
600 |
-
if not text_content and not files:
|
601 |
-
print("No content to display")
|
602 |
-
return history
|
603 |
-
|
604 |
-
# Add message with images to history
|
605 |
-
if files and len(files) > 0:
|
606 |
-
# Add text message first if it exists
|
607 |
-
if text_content:
|
608 |
-
# Add a separate text message
|
609 |
-
print(f"Adding text message: {text_content}")
|
610 |
-
history.append([text_content, None])
|
611 |
-
|
612 |
-
# Then add each image file separately
|
613 |
-
for file_path in files:
|
614 |
-
if file_path and isinstance(file_path, str):
|
615 |
-
print(f"Adding image: {file_path}")
|
616 |
-
# Add image as a separate message with no text
|
617 |
-
history.append([f"", None])
|
618 |
-
|
619 |
-
return history
|
620 |
-
else:
|
621 |
-
# For text-only messages
|
622 |
-
print(f"Adding text-only message: {text_content}")
|
623 |
-
history.append([text_content, None])
|
624 |
-
return history
|
625 |
-
|
626 |
-
# Define bot response function
|
627 |
-
def bot(history, system_msg, max_tokens, temperature, top_p, freq_penalty, seed, provider, api_key, custom_model, search_term, selected_model, mcp_url, tts_on, tts_spd):
|
628 |
-
# Check if history is valid
|
629 |
-
if not history or len(history) == 0:
|
630 |
-
print("No history to process")
|
631 |
-
return history
|
632 |
-
|
633 |
-
# Get the most recent message and detect if it's an image
|
634 |
-
user_message = history[-1][0]
|
635 |
-
print(f"Processing user message: {user_message}")
|
636 |
-
|
637 |
-
is_image = False
|
638 |
-
image_path = None
|
639 |
-
text_content = user_message
|
640 |
|
641 |
-
#
|
642 |
-
|
643 |
-
is_image = True
|
644 |
-
# Extract image path from markdown format 
|
645 |
-
image_path = user_message.replace(".replace(")", "")
|
646 |
-
print(f"Image detected: {image_path}")
|
647 |
-
text_content = "" # No text for image-only messages
|
648 |
|
649 |
-
|
650 |
-
|
651 |
-
|
652 |
-
# Use the previous message as context if it's text
|
653 |
-
prev_message = history[-2][0]
|
654 |
-
if isinstance(prev_message, str) and not prev_message.startswith(":
|
655 |
-
text_context = prev_message
|
656 |
-
print(f"Using text context from previous message: {text_context}")
|
657 |
|
658 |
-
# Process
|
659 |
-
|
660 |
|
661 |
-
|
662 |
-
|
663 |
-
|
664 |
-
|
665 |
-
|
666 |
-
|
667 |
-
history[:-1], # Previous history
|
668 |
-
system_msg,
|
669 |
-
max_tokens,
|
670 |
-
temperature,
|
671 |
-
top_p,
|
672 |
-
freq_penalty,
|
673 |
-
seed,
|
674 |
-
provider,
|
675 |
-
api_key,
|
676 |
-
custom_model,
|
677 |
-
search_term,
|
678 |
-
selected_model,
|
679 |
-
mcp_url,
|
680 |
-
tts_on,
|
681 |
-
tts_spd
|
682 |
-
):
|
683 |
-
history[-1][1] = response
|
684 |
-
yield history
|
685 |
else:
|
686 |
-
|
687 |
-
|
688 |
-
text_content, # Text message
|
689 |
-
None, # No image
|
690 |
-
history[:-1], # Previous history
|
691 |
-
system_msg,
|
692 |
-
max_tokens,
|
693 |
-
temperature,
|
694 |
-
top_p,
|
695 |
-
freq_penalty,
|
696 |
-
seed,
|
697 |
-
provider,
|
698 |
-
api_key,
|
699 |
-
custom_model,
|
700 |
-
search_term,
|
701 |
-
selected_model,
|
702 |
-
mcp_url,
|
703 |
-
tts_on,
|
704 |
-
tts_spd
|
705 |
-
):
|
706 |
-
history[-1][1] = response
|
707 |
-
yield history
|
708 |
-
|
709 |
-
# Event handlers - only using the MultimodalTextbox's built-in submit functionality
|
710 |
-
msg.submit(
|
711 |
-
user,
|
712 |
-
[msg, chatbot],
|
713 |
-
[chatbot],
|
714 |
-
queue=False
|
715 |
-
).then(
|
716 |
-
bot,
|
717 |
-
[chatbot, system_message_box, max_tokens_slider, temperature_slider, top_p_slider,
|
718 |
-
frequency_penalty_slider, seed_slider, provider_radio, byok_textbox, custom_model_box,
|
719 |
-
model_search_box, featured_model_radio, mcp_server_url, tts_enabled, tts_speed],
|
720 |
-
[chatbot]
|
721 |
-
).then(
|
722 |
-
lambda: {"text": "", "files": []}, # Clear inputs after submission
|
723 |
-
None,
|
724 |
-
[msg]
|
725 |
-
)
|
726 |
-
|
727 |
-
# Connect the model filter to update the radio choices
|
728 |
-
model_search_box.change(
|
729 |
-
fn=filter_models,
|
730 |
-
inputs=model_search_box,
|
731 |
-
outputs=featured_model_radio
|
732 |
-
)
|
733 |
-
print("Model search box change event linked.")
|
734 |
-
|
735 |
-
# Connect the featured model radio to update the custom model box
|
736 |
-
featured_model_radio.change(
|
737 |
-
fn=set_custom_model_from_radio,
|
738 |
-
inputs=featured_model_radio,
|
739 |
-
outputs=custom_model_box
|
740 |
-
)
|
741 |
-
print("Featured model radio button change event linked.")
|
742 |
|
743 |
-
|
744 |
-
|
745 |
-
fn=validate_provider,
|
746 |
-
inputs=[byok_textbox, provider_radio],
|
747 |
-
outputs=provider_radio
|
748 |
-
)
|
749 |
-
print("BYOK textbox change event linked.")
|
750 |
-
|
751 |
-
# Also validate provider when the radio changes to ensure consistency
|
752 |
-
provider_radio.change(
|
753 |
-
fn=validate_provider,
|
754 |
-
inputs=[byok_textbox, provider_radio],
|
755 |
-
outputs=provider_radio
|
756 |
-
)
|
757 |
-
print("Provider radio button change event linked.")
|
758 |
-
|
759 |
-
print("Gradio interface initialized.")
|
760 |
-
|
761 |
-
if __name__ == "__main__":
|
762 |
-
print("Launching the demo application.")
|
763 |
-
demo.launch(show_api=True)
|
|
|
|
|
|
|
1 |
import os
|
2 |
import json
|
|
|
|
|
|
|
3 |
import requests
|
4 |
+
import logging
|
5 |
+
from typing import Dict, List, Optional, Any, Union
|
|
|
6 |
|
7 |
+
# Setup logging
|
8 |
+
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
|
9 |
+
logger = logging.getLogger(__name__)
|
10 |
|
11 |
+
class MCPClient:
|
12 |
+
"""
|
13 |
+
Client for interacting with MCP (Model Context Protocol) servers.
|
14 |
+
Implements a subset of the MCP protocol sufficient for TTS and other basic tools.
|
15 |
+
"""
|
16 |
|
17 |
+
def __init__(self, server_url: str):
|
18 |
+
"""
|
19 |
+
Initialize an MCP client for a specific server URL
|
20 |
|
21 |
+
Args:
|
22 |
+
server_url: The URL of the MCP server to connect to
|
23 |
+
"""
|
24 |
+
self.server_url = server_url
|
25 |
+
self.session_id = None
|
26 |
+
logger.info(f"Initialized MCP Client for server: {server_url}")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
27 |
|
28 |
+
def connect(self) -> bool:
|
29 |
+
"""
|
30 |
+
Establish connection with the MCP server
|
31 |
+
|
32 |
+
Returns:
|
33 |
+
bool: True if connection was successful, False otherwise
|
34 |
+
"""
|
35 |
try:
|
36 |
+
# For a real MCP implementation, this would use the MCP initialization protocol
|
37 |
+
# This is a simplified version for demonstration purposes
|
38 |
+
response = requests.post(
|
39 |
+
f"{self.server_url}/connect",
|
40 |
+
json={"client": "Serverless-TextGen-Hub", "version": "1.0.0"},
|
41 |
+
timeout=10
|
42 |
+
)
|
43 |
|
44 |
+
if response.status_code == 200:
|
45 |
+
result = response.json()
|
46 |
+
self.session_id = result.get("session_id")
|
47 |
+
logger.info(f"Connected to MCP server with session ID: {self.session_id}")
|
48 |
+
return True
|
49 |
+
else:
|
50 |
+
logger.error(f"Failed to connect to MCP server: {response.status_code} - {response.text}")
|
51 |
+
return False
|
52 |
except Exception as e:
|
53 |
+
logger.error(f"Error connecting to MCP server: {e}")
|
54 |
return False
|
55 |
|
56 |
+
def list_tools(self) -> List[Dict]:
|
57 |
+
"""
|
58 |
+
List available tools from the MCP server
|
|
|
59 |
|
60 |
+
Returns:
|
61 |
+
List[Dict]: List of tool definitions from the server
|
62 |
+
"""
|
63 |
+
if not self.session_id:
|
64 |
+
if not self.connect():
|
65 |
+
return []
|
66 |
|
67 |
try:
|
68 |
+
# In a real MCP implementation, this would use the tools/list method
|
69 |
+
response = requests.get(
|
70 |
+
f"{self.server_url}/tools/list",
|
71 |
+
headers={"X-MCP-Session": self.session_id},
|
72 |
+
timeout=10
|
73 |
+
)
|
74 |
+
|
75 |
+
if response.status_code == 200:
|
76 |
+
result = response.json()
|
77 |
+
tools = result.get("tools", [])
|
78 |
+
logger.info(f"Retrieved {len(tools)} tools from MCP server")
|
79 |
+
return tools
|
80 |
+
else:
|
81 |
+
logger.error(f"Failed to list tools: {response.status_code} - {response.text}")
|
82 |
+
return []
|
83 |
except Exception as e:
|
84 |
+
logger.error(f"Error listing tools: {e}")
|
85 |
+
return []
|
86 |
+
|
87 |
+
def call_tool(self, tool_name: str, args: Dict) -> Dict:
|
88 |
+
"""
|
89 |
+
Call a tool on the MCP server
|
90 |
+
|
91 |
+
Args:
|
92 |
+
tool_name: Name of the tool to call
|
93 |
+
args: Arguments to pass to the tool
|
94 |
+
|
95 |
+
Returns:
|
96 |
+
Dict: Result of the tool call
|
97 |
+
"""
|
98 |
+
if not self.session_id:
|
99 |
+
if not self.connect():
|
100 |
+
return {"error": "Not connected to MCP server"}
|
101 |
+
|
102 |
+
try:
|
103 |
+
# In a real MCP implementation, this would use the tools/call method
|
104 |
+
response = requests.post(
|
105 |
+
f"{self.server_url}/tools/call",
|
106 |
+
headers={"X-MCP-Session": self.session_id},
|
107 |
+
json={"name": tool_name, "arguments": args},
|
108 |
+
timeout=30 # Longer timeout for tool calls
|
109 |
+
)
|
110 |
+
|
111 |
+
if response.status_code == 200:
|
112 |
+
result = response.json()
|
113 |
+
logger.info(f"Successfully called tool {tool_name}")
|
114 |
+
return result
|
115 |
+
else:
|
116 |
+
error_msg = f"Failed to call tool {tool_name}: {response.status_code} - {response.text}"
|
117 |
+
logger.error(error_msg)
|
118 |
+
return {"error": error_msg}
|
119 |
+
except Exception as e:
|
120 |
+
error_msg = f"Error calling tool {tool_name}: {e}"
|
121 |
+
logger.error(error_msg)
|
122 |
+
return {"error": error_msg}
|
123 |
|
124 |
def close(self):
|
125 |
+
"""Clean up the client connection"""
|
126 |
+
if self.session_id:
|
127 |
try:
|
128 |
+
# For a real MCP implementation, this would use the shutdown method
|
129 |
+
requests.post(
|
130 |
+
f"{self.server_url}/disconnect",
|
131 |
+
headers={"X-MCP-Session": self.session_id},
|
132 |
+
timeout=5
|
133 |
+
)
|
134 |
+
logger.info(f"Disconnected from MCP server")
|
135 |
except Exception as e:
|
136 |
+
logger.error(f"Error disconnecting from MCP server: {e}")
|
137 |
+
finally:
|
138 |
+
self.session_id = None
|
139 |
|
140 |
+
|
141 |
+
def get_mcp_servers() -> Dict[str, Dict[str, str]]:
|
142 |
+
"""
|
143 |
+
Load MCP server configuration from environment variable
|
144 |
|
|
|
|
|
|
|
|
|
|
|
145 |
Returns:
|
146 |
+
Dict[str, Dict[str, str]]: Map of server names to server configurations
|
147 |
"""
|
|
|
|
|
|
|
148 |
try:
|
149 |
+
mcp_config = os.getenv("MCP_CONFIG")
|
150 |
+
if mcp_config:
|
151 |
+
servers = json.loads(mcp_config)
|
152 |
+
logger.info(f"Loaded {len(servers)} MCP servers from configuration")
|
153 |
+
return servers
|
154 |
+
else:
|
155 |
+
logger.warning("No MCP configuration found")
|
156 |
+
return {}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
157 |
except Exception as e:
|
158 |
+
logger.error(f"Error loading MCP configuration: {e}")
|
159 |
+
return {}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
160 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
161 |
|
162 |
+
def text_to_speech(text: str, server_name: str = None) -> Optional[str]:
|
163 |
+
"""
|
164 |
+
Convert text to speech using an MCP TTS server
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
165 |
|
166 |
+
Args:
|
167 |
+
text: The text to convert to speech
|
168 |
+
server_name: Name of the MCP server to use for TTS
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
169 |
|
170 |
+
Returns:
|
171 |
+
Optional[str]: Data URL containing the audio, or None if conversion failed
|
172 |
+
"""
|
173 |
+
servers = get_mcp_servers()
|
|
|
|
|
|
|
174 |
|
175 |
+
if not server_name or server_name not in servers:
|
176 |
+
logger.warning(f"TTS server {server_name} not configured")
|
177 |
+
return None
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
178 |
|
179 |
+
server_url = servers[server_name].get("url")
|
180 |
+
if not server_url:
|
181 |
+
logger.warning(f"No URL found for TTS server {server_name}")
|
182 |
+
return None
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
183 |
|
184 |
+
client = MCPClient(server_url)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
185 |
|
186 |
+
try:
|
187 |
+
# List available tools to find the TTS tool
|
188 |
+
tools = client.list_tools()
|
189 |
+
|
190 |
+
# Find a TTS tool - look for common TTS tool names
|
191 |
+
tts_tool = next(
|
192 |
+
(t for t in tools if any(
|
193 |
+
name in t["name"].lower()
|
194 |
+
for name in ["text_to_audio", "tts", "text_to_speech", "speech"]
|
195 |
+
)),
|
196 |
+
None
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
197 |
)
|
198 |
|
199 |
+
if not tts_tool:
|
200 |
+
logger.warning(f"No TTS tool found on server {server_name}")
|
201 |
+
return None
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
202 |
|
203 |
+
# Call the TTS tool
|
204 |
+
result = client.call_tool(tts_tool["name"], {"text": text, "speed": 1.0})
|
|
|
|
|
|
|
|
|
|
|
205 |
|
206 |
+
if "error" in result:
|
207 |
+
logger.error(f"TTS error: {result['error']}")
|
208 |
+
return None
|
|
|
|
|
|
|
|
|
|
|
209 |
|
210 |
+
# Process the result - usually a base64 encoded WAV
|
211 |
+
audio_data = result.get("audio") or result.get("content") or result.get("result")
|
212 |
|
213 |
+
if isinstance(audio_data, str) and audio_data.startswith("data:audio"):
|
214 |
+
# Already a data URL
|
215 |
+
return audio_data
|
216 |
+
elif isinstance(audio_data, str):
|
217 |
+
# Assume it's base64 encoded
|
218 |
+
return f"data:audio/wav;base64,{audio_data}"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
219 |
else:
|
220 |
+
logger.error(f"Unexpected TTS result format: {type(audio_data)}")
|
221 |
+
return None
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
222 |
|
223 |
+
finally:
|
224 |
+
client.close()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|