Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -5,47 +5,44 @@ import json
|
|
5 |
import base64
|
6 |
from PIL import Image
|
7 |
import io
|
8 |
-
import requests
|
9 |
-
from smolagents.mcp_client import MCPClient
|
|
|
|
|
|
|
|
|
10 |
|
11 |
ACCESS_TOKEN = os.getenv("HF_TOKEN")
|
12 |
-
|
13 |
-
print("Access token loaded from HF_TOKEN environment variable.")
|
14 |
-
else:
|
15 |
-
print("Warning: HF_TOKEN environment variable not set. Some operations might fail.")
|
16 |
|
17 |
# Function to encode image to base64
|
18 |
-
def encode_image(
|
19 |
-
if not
|
20 |
-
print("No image path
|
21 |
return None
|
22 |
|
23 |
try:
|
24 |
-
|
25 |
-
|
26 |
-
|
27 |
-
|
28 |
-
|
29 |
-
if not os.path.exists(image_path_or_pil):
|
30 |
-
print(f"Error: Image file not found at {image_path_or_pil}")
|
31 |
-
return None
|
32 |
-
image = Image.open(image_path_or_pil)
|
33 |
else:
|
34 |
-
|
35 |
-
|
36 |
|
|
|
37 |
if image.mode == 'RGBA':
|
38 |
image = image.convert('RGB')
|
39 |
|
|
|
40 |
buffered = io.BytesIO()
|
41 |
-
image.save(buffered, format="JPEG")
|
42 |
img_str = base64.b64encode(buffered.getvalue()).decode("utf-8")
|
43 |
-
print("Image encoded successfully
|
44 |
return img_str
|
45 |
except Exception as e:
|
46 |
print(f"Error encoding image: {e}")
|
47 |
-
import traceback
|
48 |
-
traceback.print_exc()
|
49 |
return None
|
50 |
|
51 |
# Dictionary to store active MCP connections
|
@@ -54,606 +51,748 @@ mcp_connections = {}
|
|
54 |
def connect_to_mcp_server(server_url, server_name=None):
|
55 |
"""Connect to an MCP server and return available tools"""
|
56 |
if not server_url:
|
57 |
-
return None, "No server URL provided
|
58 |
|
59 |
try:
|
60 |
-
|
61 |
-
client = MCPClient({"url": server_url})
|
62 |
-
|
|
|
63 |
|
64 |
-
|
|
|
65 |
mcp_connections[name] = {"client": client, "tools": tools, "url": server_url}
|
66 |
|
67 |
-
|
68 |
-
return name, f"Successfully connected to '{name}' ({server_url}). Found {len(tools)} tool(s)."
|
69 |
except Exception as e:
|
70 |
-
print(f"Error connecting to MCP server
|
71 |
-
|
72 |
-
traceback.print_exc()
|
73 |
-
return None, f"Error connecting to MCP server '{server_url}': {str(e)}"
|
74 |
|
75 |
def list_mcp_tools(server_name):
|
76 |
"""List available tools for a connected MCP server"""
|
77 |
if server_name not in mcp_connections:
|
78 |
-
return "Server not connected
|
79 |
|
80 |
tools = mcp_connections[server_name]["tools"]
|
81 |
tool_info = []
|
82 |
for tool in tools:
|
83 |
-
tool_info.append(f"-
|
84 |
|
85 |
if not tool_info:
|
86 |
-
return "No tools available for this server
|
87 |
|
88 |
return "\n".join(tool_info)
|
89 |
|
90 |
def call_mcp_tool(server_name, tool_name, **kwargs):
|
91 |
-
"""Call a specific tool from an MCP server
|
92 |
if server_name not in mcp_connections:
|
93 |
-
return {"
|
94 |
|
95 |
-
|
|
|
|
|
96 |
|
|
|
|
|
|
|
|
|
|
|
97 |
try:
|
98 |
-
|
99 |
-
|
100 |
-
|
101 |
-
|
102 |
-
if
|
103 |
-
|
104 |
-
|
105 |
-
|
106 |
-
|
107 |
-
|
108 |
-
|
109 |
-
for block in tool_result.content:
|
110 |
-
if hasattr(block, 'uri') and isinstance(block.uri, str) and block.uri.startswith('data:audio/'):
|
111 |
-
audio_block_found = {
|
112 |
-
"type": "audio",
|
113 |
-
"data_uri": block.uri,
|
114 |
-
"name": getattr(block, 'name', 'audio_output.wav')
|
115 |
-
}
|
116 |
-
break # Prioritize first audio block
|
117 |
-
elif hasattr(block, 'text') and block.text is not None:
|
118 |
-
text_parts.append(str(block.text))
|
119 |
-
elif hasattr(block, 'json_data') and block.json_data is not None:
|
120 |
-
try:
|
121 |
-
json_parts.append(json.dumps(block.json_data, indent=2))
|
122 |
-
except TypeError:
|
123 |
-
json_parts.append(str(block.json_data)) # Fallback
|
124 |
-
else:
|
125 |
-
other_parts.append(str(block))
|
126 |
-
|
127 |
-
if audio_block_found:
|
128 |
-
print(f"MCP tool returned audio: {audio_block_found['name']}")
|
129 |
-
return audio_block_found
|
130 |
-
elif text_parts:
|
131 |
-
full_text = "\n".join(text_parts)
|
132 |
-
print(f"MCP tool returned text: {full_text[:100]}...")
|
133 |
-
return {"type": "text", "value": full_text}
|
134 |
-
elif json_parts:
|
135 |
-
full_json_str = "\n".join(json_parts)
|
136 |
-
print(f"MCP tool returned JSON string.")
|
137 |
-
return {"type": "json_string", "value": full_json_str} # Treat as string for display
|
138 |
-
elif other_parts:
|
139 |
-
print(f"MCP tool returned other content types.")
|
140 |
-
return {"type": "text", "value": "\n".join(other_parts)}
|
141 |
else:
|
142 |
-
|
143 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
144 |
|
145 |
-
print("MCP tool executed, but ToolResult or its content was empty.")
|
146 |
-
return {"type": "text", "value": "Tool executed, but returned no content."}
|
147 |
except Exception as e:
|
148 |
-
print(f"Error calling MCP tool
|
149 |
import traceback
|
150 |
traceback.print_exc()
|
151 |
-
return {"
|
152 |
|
153 |
-
def analyze_message_for_tool_call(message, active_mcp_servers,
|
154 |
"""Analyze a message to determine if an MCP tool should be called"""
|
155 |
-
if not message or not message.strip()
|
156 |
return None, None
|
157 |
|
158 |
-
|
159 |
-
for
|
160 |
-
if
|
161 |
-
|
162 |
-
for
|
163 |
-
|
164 |
-
|
165 |
-
|
166 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
167 |
|
168 |
-
if not
|
169 |
-
print("No active MCP tools found for analysis.")
|
170 |
return None, None
|
171 |
|
172 |
-
|
|
|
|
|
173 |
|
174 |
-
|
175 |
-
|
176 |
-
You
|
177 |
-
|
178 |
-
|
179 |
-
|
180 |
-
|
181 |
-
|
182 |
-
|
183 |
-
|
184 |
-
|
185 |
-
|
186 |
-
|
187 |
-
|
188 |
-
|
189 |
-
|
190 |
-
|
191 |
-
|
192 |
-
|
193 |
-
|
194 |
-
|
195 |
-
|
196 |
-
|
197 |
-
Response: NO_TOOL_NEEDED
|
198 |
-
|
199 |
-
User's current message is: "{message}"
|
200 |
-
Now, provide your decision:"""
|
201 |
|
202 |
try:
|
203 |
-
|
204 |
-
|
205 |
-
model=llm_model_to_use,
|
206 |
messages=[
|
207 |
-
|
208 |
-
{"role": "user", "content":
|
209 |
],
|
210 |
-
temperature=0.1,
|
211 |
-
max_tokens=300
|
212 |
-
stop=["\n\n"] # Stop early if LLM adds extra verbiage
|
213 |
)
|
214 |
|
215 |
-
|
216 |
-
print(f"
|
217 |
|
218 |
-
if
|
219 |
-
print("LLM determined no tool needed.")
|
220 |
return None, None
|
221 |
|
222 |
-
# Try to extract JSON from the response (handle potential markdown code blocks)
|
223 |
-
if analysis_text.startswith("```json"):
|
224 |
-
analysis_text = analysis_text.replace("```json", "").replace("```", "").strip()
|
225 |
-
elif analysis_text.startswith("```"):
|
226 |
-
analysis_text = analysis_text.replace("```", "").strip()
|
227 |
-
|
228 |
-
|
229 |
-
json_start = analysis_text.find("{")
|
230 |
-
json_end = analysis_text.rfind("}") + 1
|
231 |
-
|
232 |
-
if json_start == -1 or json_end <= json_start:
|
233 |
-
print(f"Could not find valid JSON object in LLM response: '{analysis_text}'")
|
234 |
-
return None, None
|
235 |
-
|
236 |
-
json_str = analysis_text[json_start:json_end]
|
237 |
try:
|
238 |
-
|
239 |
-
if "server_name" in
|
240 |
-
|
241 |
-
|
242 |
-
"
|
243 |
-
"parameters": tool_call_data.get("parameters", {})
|
244 |
}
|
245 |
else:
|
246 |
-
print(f"LLM response
|
247 |
return None, None
|
248 |
-
except json.JSONDecodeError
|
249 |
-
print(f"Failed to parse tool call JSON from LLM
|
250 |
return None, None
|
251 |
|
252 |
except Exception as e:
|
253 |
-
print(f"Error
|
254 |
-
import traceback
|
255 |
-
traceback.print_exc()
|
256 |
return None, None
|
257 |
|
258 |
def respond(
|
259 |
-
|
260 |
-
|
261 |
-
|
262 |
-
|
263 |
-
|
264 |
-
|
265 |
-
|
266 |
-
|
267 |
-
|
268 |
-
|
269 |
-
|
270 |
-
|
271 |
-
|
272 |
-
|
273 |
-
|
274 |
-
|
275 |
-
|
276 |
):
|
277 |
-
print(f"
|
278 |
-
print(f"
|
279 |
-
print(f"
|
280 |
-
|
281 |
-
print(f"
|
282 |
-
print(f"
|
283 |
-
print(f"
|
284 |
-
|
285 |
-
|
286 |
-
|
287 |
-
|
288 |
-
|
|
|
|
|
|
|
|
|
289 |
|
290 |
-
|
|
|
|
|
|
|
291 |
|
292 |
-
|
293 |
-
|
294 |
-
|
295 |
-
|
296 |
-
|
297 |
-
|
298 |
-
|
299 |
-
|
300 |
-
|
301 |
-
|
302 |
-
if
|
303 |
-
|
304 |
-
command_parts = message_text_input.split(" ", 3)
|
305 |
if len(command_parts) < 3:
|
306 |
yield "Invalid MCP command. Format: /mcp <server_name> <tool_name> [arguments_json]"
|
307 |
return
|
308 |
|
309 |
-
_,
|
310 |
-
|
311 |
-
invoked_tool_display_name = tool_name_cmd
|
312 |
-
args_json_str = "{}" if len(command_parts) < 4 else command_parts
|
313 |
|
314 |
try:
|
315 |
-
|
316 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
317 |
except json.JSONDecodeError:
|
318 |
-
yield f"Invalid JSON arguments
|
319 |
return
|
320 |
-
except Exception as
|
321 |
-
yield f"Error
|
322 |
return
|
323 |
-
|
324 |
-
|
325 |
-
|
326 |
-
|
327 |
-
detected_server_nl, tool_info_nl = analyze_message_for_tool_call(
|
328 |
-
message_text_input,
|
329 |
-
active_mcp_server_names,
|
330 |
-
llm_client_instance,
|
331 |
-
model_id_for_llm,
|
332 |
-
system_message_prompt
|
333 |
)
|
334 |
|
335 |
-
if
|
336 |
-
|
337 |
-
|
338 |
-
|
339 |
-
|
340 |
-
|
341 |
-
|
342 |
-
|
343 |
-
|
344 |
-
|
345 |
-
|
346 |
-
|
347 |
-
|
348 |
-
|
349 |
-
|
350 |
-
|
351 |
-
|
352 |
-
|
353 |
-
|
354 |
-
|
355 |
-
|
356 |
-
|
357 |
-
|
358 |
-
|
359 |
-
|
|
|
|
|
|
|
|
|
360 |
|
361 |
-
|
362 |
-
|
|
|
|
|
363 |
|
364 |
-
|
365 |
-
|
366 |
-
|
367 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
368 |
|
369 |
-
if
|
370 |
-
for
|
371 |
-
|
372 |
-
|
373 |
-
|
374 |
-
|
375 |
-
|
376 |
-
|
377 |
-
|
378 |
-
|
379 |
-
|
380 |
-
|
381 |
-
|
382 |
-
|
383 |
-
|
384 |
-
|
385 |
-
|
386 |
-
# Augment system message with MCP tool info if enabled
|
387 |
-
augmented_sys_msg = system_message_prompt
|
388 |
-
if mcp_is_enabled and active_mcp_server_names:
|
389 |
-
mcp_tool_descriptions_for_llm = []
|
390 |
-
for server_name_iter in active_mcp_server_names:
|
391 |
if server_name_iter in mcp_connections:
|
392 |
-
|
393 |
-
|
394 |
-
|
395 |
|
396 |
-
if
|
397 |
-
|
398 |
-
interaction_advice = ""
|
399 |
-
if mcp_interaction_mode_choice == "Command Mode":
|
400 |
-
interaction_advice = "The user can invoke these tools using '/mcp <server_name> <tool_name> <json_args>'."
|
401 |
-
# For Natural Language mode, the LLM doesn't need explicit instruction in system prompt
|
402 |
-
# as `analyze_message_for_tool_call` handles that part.
|
403 |
|
404 |
-
|
405 |
-
|
406 |
-
|
407 |
-
|
408 |
|
409 |
-
|
410 |
-
|
|
|
|
|
|
|
411 |
|
412 |
-
|
413 |
-
if
|
414 |
-
|
415 |
-
|
416 |
-
|
417 |
-
|
418 |
-
if encoded_hist_img:
|
419 |
-
history_user_llm_content.append({
|
420 |
-
"type": "image_url",
|
421 |
-
"image_url": {"url": f"data:image/jpeg;base64,{encoded_hist_img}"}
|
422 |
-
})
|
423 |
-
if history_user_llm_content: # Only add if there's actual content
|
424 |
-
messages_for_llm_api.append({"role": "user", "content": history_user_llm_content})
|
425 |
|
426 |
-
if
|
427 |
-
|
428 |
|
429 |
-
|
430 |
-
|
|
|
|
|
|
|
431 |
|
|
|
|
|
432 |
|
433 |
-
|
434 |
-
"max_tokens":
|
435 |
-
"
|
|
|
|
|
436 |
}
|
437 |
-
|
438 |
-
|
|
|
439 |
|
440 |
-
print(f"Sending request to LLM: Model={model_id_for_llm}, Params={llm_parameters}")
|
441 |
-
streamed_response_text = ""
|
442 |
try:
|
443 |
-
|
444 |
-
model=
|
445 |
-
messages=
|
446 |
stream=True,
|
447 |
-
**
|
448 |
)
|
449 |
|
450 |
-
# print("
|
451 |
-
|
|
|
452 |
if hasattr(chunk, 'choices') and len(chunk.choices) > 0:
|
453 |
-
delta
|
454 |
-
|
455 |
-
|
456 |
-
|
457 |
-
|
458 |
-
|
459 |
-
|
460 |
-
|
461 |
-
|
462 |
-
print(
|
463 |
-
|
464 |
-
|
465 |
-
streamed_response_text += f"\n{error_msg}" # Append error to existing stream if any
|
466 |
-
yield streamed_response_text
|
467 |
|
468 |
-
print(
|
469 |
|
470 |
|
471 |
# GRADIO UI
|
472 |
-
with gr.Blocks(theme="Nymbo/Nymbo_Theme"
|
473 |
-
gr.Markdown("# Serverless TextGen Hub with MCP Client")
|
474 |
chatbot = gr.Chatbot(
|
475 |
-
label="Chat",
|
476 |
height=600,
|
477 |
show_copy_button=True,
|
478 |
-
placeholder="Select a model
|
479 |
-
|
480 |
-
|
|
|
481 |
)
|
|
|
482 |
|
483 |
with gr.Row():
|
484 |
-
|
485 |
-
placeholder="Type a message or upload images...
|
486 |
show_label=False,
|
487 |
container=False,
|
488 |
scale=12,
|
489 |
-
file_types=["image"],
|
490 |
-
file_count="multiple"
|
|
|
|
|
491 |
)
|
492 |
-
# submit_button = gr.Button("Send", variant="primary", scale=1, min_width=100) # Optional explicit send button
|
493 |
|
494 |
-
|
495 |
-
|
496 |
-
|
497 |
-
|
|
|
|
|
|
|
|
|
498 |
)
|
499 |
|
500 |
with gr.Row():
|
501 |
with gr.Column(scale=1):
|
502 |
-
|
503 |
-
|
504 |
-
|
505 |
with gr.Column(scale=1):
|
506 |
-
|
507 |
-
|
508 |
|
509 |
-
|
510 |
-
|
511 |
-
|
512 |
-
|
513 |
-
provider_radio_ui = gr.Radio(choices=providers_list_ui, value="hf-inference", label="Inference Provider")
|
514 |
-
|
515 |
-
byok_textbox_ui = gr.Textbox(label="Your Hugging Face API Key (Optional)", placeholder="Enter HF Token if using non-hf-inference providers or private models", type="password")
|
516 |
-
|
517 |
-
custom_model_id_box = gr.Textbox(label="Custom Model ID (Overrides selection below)", placeholder="e.g., meta-llama/Llama-3-8B-Instruct")
|
518 |
|
519 |
-
|
520 |
|
521 |
-
|
522 |
-
|
523 |
-
"meta-llama/
|
524 |
-
"meta-llama/
|
525 |
"mistralai/Mistral-Nemo-Instruct-2407",
|
526 |
-
"
|
527 |
-
"Qwen/Qwen2-
|
528 |
-
"
|
529 |
-
# Multimodal
|
530 |
-
"Salesforce/
|
531 |
-
"llava-hf/llava-
|
532 |
-
"
|
533 |
-
"
|
|
|
|
|
|
|
|
|
|
|
534 |
]
|
535 |
-
|
536 |
-
|
537 |
-
gr.Markdown("Tip: For multimodal chat, ensure selected model supports image inputs (e.g., LLaVA, PaliGemma, Kosmos-2).")
|
538 |
|
539 |
-
with gr.Accordion("MCP
|
540 |
-
|
541 |
-
|
542 |
with gr.Row():
|
543 |
-
|
544 |
-
|
545 |
-
|
546 |
|
547 |
-
|
|
|
|
|
548 |
|
549 |
-
|
550 |
-
|
551 |
-
|
552 |
-
|
|
|
553 |
|
554 |
-
|
555 |
-
|
556 |
-
|
557 |
-
)
|
558 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
559 |
|
560 |
-
|
561 |
-
|
562 |
-
|
563 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
564 |
|
565 |
-
|
566 |
-
|
567 |
-
|
568 |
|
569 |
-
#
|
570 |
-
|
571 |
-
|
572 |
-
|
573 |
-
|
574 |
-
|
575 |
-
current_chat_history, system_prompt, max_tokens, temp, top_p_val, freq_penalty, seed_val,
|
576 |
-
provider_val, api_key_val, custom_model_val, selected_model_val, # Removed search_term as it's not directly used by respond
|
577 |
-
mcp_enabled_val, active_servers_val, mcp_mode_val
|
578 |
-
):
|
579 |
-
if not current_chat_history or current_chat_history[-1] is not None: # If no user message or last message already has bot response
|
580 |
-
yield current_chat_history # Or simply `return current_chat_history` if not streaming
|
581 |
-
return
|
582 |
|
583 |
-
user_turn_content, _ = current_chat_history[-1] # Get the latest user turn: (text, [files])
|
584 |
-
message_text, message_files = user_turn_content
|
585 |
|
586 |
-
|
587 |
-
|
|
|
|
|
|
|
|
|
588 |
|
589 |
-
|
590 |
-
|
591 |
-
|
592 |
-
|
593 |
-
|
594 |
-
|
|
|
595 |
|
596 |
-
|
597 |
-
|
598 |
-
|
599 |
-
|
600 |
-
yield current_chat_history
|
601 |
-
|
602 |
-
# Link UI components to functions
|
603 |
-
msg_textbox.submit(
|
604 |
-
user_interaction,
|
605 |
-
inputs=[msg_textbox, chat_history_state],
|
606 |
-
outputs=[chat_history_state, msg_textbox] # Update history and clear input
|
607 |
-
).then(
|
608 |
-
bot_response_generator,
|
609 |
-
inputs=[
|
610 |
-
chat_history_state, system_message_prompt_box, max_tokens_slider_ui, temperature_slider_ui,
|
611 |
-
top_p_slider_ui, frequency_penalty_slider_ui, seed_slider_ui, provider_radio_ui,
|
612 |
-
byok_textbox_ui, custom_model_id_box, featured_model_radio_ui,
|
613 |
-
mcp_enabled_checkbox_ui, active_mcp_servers_dropdown, mcp_interaction_mode_radio
|
614 |
-
],
|
615 |
-
outputs=[chatbot] # Stream to chatbot
|
616 |
-
)
|
617 |
|
618 |
-
|
619 |
-
|
620 |
-
if not url or not url.strip():
|
621 |
-
return "MCP Server URL cannot be empty.", gr.update(choices=list(mcp_connections.keys()))
|
622 |
|
623 |
-
|
624 |
-
|
625 |
-
|
626 |
-
|
627 |
-
|
628 |
-
|
629 |
-
|
630 |
-
|
631 |
-
|
632 |
-
|
633 |
-
|
634 |
-
|
635 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
636 |
|
637 |
-
# Model Filtering
|
638 |
-
def filter_featured_models(search_query):
|
639 |
-
if not search_query:
|
640 |
-
return gr.update(choices=featured_models_list_data)
|
641 |
-
filtered = [m for m in featured_models_list_data if search_query.lower() in m.lower()]
|
642 |
-
return gr.update(choices=filtered if filtered else ["No models match your search"])
|
643 |
|
644 |
-
|
|
|
645 |
|
646 |
-
|
647 |
-
|
648 |
-
|
649 |
-
|
650 |
-
|
651 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
652 |
|
653 |
-
|
654 |
-
|
655 |
|
|
|
656 |
|
657 |
if __name__ == "__main__":
|
658 |
-
print("Launching
|
659 |
-
demo.queue().launch(
|
|
|
5 |
import base64
|
6 |
from PIL import Image
|
7 |
import io
|
8 |
+
import requests
|
9 |
+
from smolagents.mcp_client import MCPClient
|
10 |
+
from mcp import ToolResult # For type hinting, good practice
|
11 |
+
from mcp.common.content_block import ValueContentBlock # To access the actual tool return value
|
12 |
+
import numpy as np # For handling audio array
|
13 |
+
import soundfile as sf # For converting audio array to WAV
|
14 |
|
15 |
ACCESS_TOKEN = os.getenv("HF_TOKEN")
|
16 |
+
print("Access token loaded.")
|
|
|
|
|
|
|
17 |
|
18 |
# Function to encode image to base64
|
19 |
+
def encode_image(image_path):
|
20 |
+
if not image_path:
|
21 |
+
print("No image path provided")
|
22 |
return None
|
23 |
|
24 |
try:
|
25 |
+
print(f"Encoding image from path: {image_path}")
|
26 |
+
|
27 |
+
# If it's already a PIL Image
|
28 |
+
if isinstance(image_path, Image.Image):
|
29 |
+
image = image_path
|
|
|
|
|
|
|
|
|
30 |
else:
|
31 |
+
# Try to open the image file
|
32 |
+
image = Image.open(image_path)
|
33 |
|
34 |
+
# Convert to RGB if image has an alpha channel (RGBA)
|
35 |
if image.mode == 'RGBA':
|
36 |
image = image.convert('RGB')
|
37 |
|
38 |
+
# Encode to base64
|
39 |
buffered = io.BytesIO()
|
40 |
+
image.save(buffered, format="JPEG")
|
41 |
img_str = base64.b64encode(buffered.getvalue()).decode("utf-8")
|
42 |
+
print("Image encoded successfully")
|
43 |
return img_str
|
44 |
except Exception as e:
|
45 |
print(f"Error encoding image: {e}")
|
|
|
|
|
46 |
return None
|
47 |
|
48 |
# Dictionary to store active MCP connections
|
|
|
51 |
def connect_to_mcp_server(server_url, server_name=None):
|
52 |
"""Connect to an MCP server and return available tools"""
|
53 |
if not server_url:
|
54 |
+
return None, "No server URL provided"
|
55 |
|
56 |
try:
|
57 |
+
# Create an MCP client and connect to the server
|
58 |
+
client = MCPClient({"url": server_url})
|
59 |
+
# Get available tools
|
60 |
+
tools = client.get_tools()
|
61 |
|
62 |
+
# Store the connection for later use
|
63 |
+
name = server_name or f"Server_{len(mcp_connections)}"
|
64 |
mcp_connections[name] = {"client": client, "tools": tools, "url": server_url}
|
65 |
|
66 |
+
return name, f"Successfully connected to {name} with {len(tools)} available tools"
|
|
|
67 |
except Exception as e:
|
68 |
+
print(f"Error connecting to MCP server: {e}")
|
69 |
+
return None, f"Error connecting to MCP server: {str(e)}"
|
|
|
|
|
70 |
|
71 |
def list_mcp_tools(server_name):
|
72 |
"""List available tools for a connected MCP server"""
|
73 |
if server_name not in mcp_connections:
|
74 |
+
return "Server not connected"
|
75 |
|
76 |
tools = mcp_connections[server_name]["tools"]
|
77 |
tool_info = []
|
78 |
for tool in tools:
|
79 |
+
tool_info.append(f"- {tool.name}: {tool.description}")
|
80 |
|
81 |
if not tool_info:
|
82 |
+
return "No tools available for this server"
|
83 |
|
84 |
return "\n".join(tool_info)
|
85 |
|
86 |
def call_mcp_tool(server_name, tool_name, **kwargs):
|
87 |
+
"""Call a specific tool from an MCP server"""
|
88 |
if server_name not in mcp_connections:
|
89 |
+
return {"error": f"Server '{server_name}' not connected"} # Return dict for consistency
|
90 |
|
91 |
+
client_data = mcp_connections[server_name]
|
92 |
+
client = client_data["client"]
|
93 |
+
server_tools = client_data["tools"]
|
94 |
|
95 |
+
# Find the requested tool
|
96 |
+
tool = next((t for t in server_tools if t.name == tool_name), None)
|
97 |
+
if not tool:
|
98 |
+
return {"error": f"Tool '{tool_name}' not found on server '{server_name}'"}
|
99 |
+
|
100 |
try:
|
101 |
+
# Call the tool with provided arguments
|
102 |
+
mcp_tool_result: ToolResult = client.call_tool(tool_name=tool_name, arguments=kwargs)
|
103 |
+
|
104 |
+
actual_result = None
|
105 |
+
if mcp_tool_result.content:
|
106 |
+
content_block = mcp_tool_result.content[0]
|
107 |
+
if isinstance(content_block, ValueContentBlock):
|
108 |
+
actual_result = content_block.value
|
109 |
+
elif hasattr(content_block, 'text'): # e.g., TextContentBlock
|
110 |
+
actual_result = content_block.text
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
111 |
else:
|
112 |
+
actual_result = str(content_block) # Fallback
|
113 |
+
else: # No content
|
114 |
+
return {"warning": "Tool returned no content."}
|
115 |
+
|
116 |
+
|
117 |
+
# Special handling for audio result (e.g., from Kokoro TTS)
|
118 |
+
# This checks if the result is a tuple (sample_rate, audio_data_list)
|
119 |
+
# Gradio MCP server serializes numpy arrays to lists.
|
120 |
+
if (server_name == "kokoroTTS" and tool_name == "text_to_audio" and
|
121 |
+
isinstance(actual_result, tuple) and len(actual_result) == 2 and
|
122 |
+
isinstance(actual_result[0], int) and
|
123 |
+
(isinstance(actual_result[1], list) or isinstance(actual_result[1], np.ndarray))):
|
124 |
+
|
125 |
+
print(f"Received audio data from {server_name}.{tool_name}")
|
126 |
+
sample_rate, audio_data_list = actual_result
|
127 |
+
|
128 |
+
# Convert list to numpy array if necessary
|
129 |
+
audio_data = np.array(audio_data_list)
|
130 |
+
|
131 |
+
# Ensure correct dtype for soundfile (float32 is common, or int16)
|
132 |
+
# Kokoro returns float, likely in [-1, 1] range.
|
133 |
+
if audio_data.dtype != np.float32 and audio_data.dtype != np.int16:
|
134 |
+
# Attempt to normalize if it looks like it's not in [-1, 1] for float
|
135 |
+
if np.issubdtype(audio_data.dtype, np.floating) and (np.min(audio_data) < -1.1 or np.max(audio_data) > 1.1):
|
136 |
+
print(f"Warning: Audio data for {server_name}.{tool_name} might not be normalized. Min: {np.min(audio_data)}, Max: {np.max(audio_data)}")
|
137 |
+
audio_data = audio_data.astype(np.float32)
|
138 |
+
|
139 |
+
wav_io = io.BytesIO()
|
140 |
+
sf.write(wav_io, audio_data, sample_rate, format='WAV')
|
141 |
+
wav_io.seek(0)
|
142 |
+
|
143 |
+
wav_b64 = base64.b64encode(wav_io.read()).decode('utf-8')
|
144 |
+
|
145 |
+
return {
|
146 |
+
"type": "audio_b64",
|
147 |
+
"data": wav_b64,
|
148 |
+
"message": f"Audio generated by {server_name}.{tool_name}"
|
149 |
+
}
|
150 |
+
|
151 |
+
# Handle other types of results
|
152 |
+
if isinstance(actual_result, dict):
|
153 |
+
return actual_result
|
154 |
+
elif isinstance(actual_result, str):
|
155 |
+
try: # If string is JSON, parse to dict
|
156 |
+
return json.loads(actual_result)
|
157 |
+
except json.JSONDecodeError:
|
158 |
+
return {"text": actual_result} # Wrap raw string
|
159 |
+
else:
|
160 |
+
return {"value": str(actual_result)} # Fallback for other primitive types
|
161 |
|
|
|
|
|
162 |
except Exception as e:
|
163 |
+
print(f"Error calling MCP tool: {e}")
|
164 |
import traceback
|
165 |
traceback.print_exc()
|
166 |
+
return {"error": f"Error calling MCP tool: {str(e)}"}
|
167 |
|
168 |
+
def analyze_message_for_tool_call(message, active_mcp_servers, client, model_to_use, system_message):
|
169 |
"""Analyze a message to determine if an MCP tool should be called"""
|
170 |
+
if not message or not message.strip():
|
171 |
return None, None
|
172 |
|
173 |
+
tool_info = []
|
174 |
+
for server_name in active_mcp_servers:
|
175 |
+
if server_name in mcp_connections:
|
176 |
+
server_tools_raw = list_mcp_tools(server_name) # This returns a string
|
177 |
+
if server_tools_raw != "Server not connected" and server_tools_raw != "No tools available for this server":
|
178 |
+
# Parse the string from list_mcp_tools
|
179 |
+
for line in server_tools_raw.split("\n"):
|
180 |
+
if line.startswith("- "):
|
181 |
+
parts = line[2:].split(":", 1)
|
182 |
+
if len(parts) == 2:
|
183 |
+
tool_info.append({
|
184 |
+
"server_name": server_name,
|
185 |
+
"tool_name": parts[0].strip(),
|
186 |
+
"description": parts[1].strip()
|
187 |
+
})
|
188 |
|
189 |
+
if not tool_info:
|
|
|
190 |
return None, None
|
191 |
|
192 |
+
tools_desc = []
|
193 |
+
for info in tool_info:
|
194 |
+
tools_desc.append(f"{info['server_name']}.{info['tool_name']}: {info['description']}")
|
195 |
|
196 |
+
tools_string = "\n".join(tools_desc)
|
197 |
+
|
198 |
+
analysis_system_prompt = f"""You are an assistant that helps determine if a user message requires using an external tool.
|
199 |
+
Available tools:
|
200 |
+
{tools_string}
|
201 |
+
|
202 |
+
Your job is to:
|
203 |
+
1. Analyze the user's message.
|
204 |
+
2. Determine if they're asking to use one of the tools.
|
205 |
+
3. If yes, respond ONLY with a JSON object with "server_name", "tool_name", and "parameters".
|
206 |
+
4. If no, respond ONLY with the exact string "NO_TOOL_NEEDED".
|
207 |
+
|
208 |
+
Example 1 (User wants TTS):
|
209 |
+
User: "Please turn this text into speech: Hello world"
|
210 |
+
Response: {{"server_name": "kokoroTTS", "tool_name": "text_to_audio", "parameters": {{"text": "Hello world", "speed": 1.0}}}}
|
211 |
+
|
212 |
+
Example 2 (User wants TTS with different server name):
|
213 |
+
User: "Use mySpeechTool to say 'good morning'"
|
214 |
+
Response: {{"server_name": "mySpeechTool", "tool_name": "text_to_audio", "parameters": {{"text": "good morning"}}}}
|
215 |
+
|
216 |
+
Example 3 (User does not want a tool):
|
217 |
+
User: "What is the capital of France?"
|
218 |
+
Response: NO_TOOL_NEEDED"""
|
|
|
|
|
|
|
|
|
219 |
|
220 |
try:
|
221 |
+
response = client.chat_completion(
|
222 |
+
model=model_to_use,
|
|
|
223 |
messages=[
|
224 |
+
{"role": "system", "content": analysis_system_prompt},
|
225 |
+
{"role": "user", "content": message}
|
226 |
],
|
227 |
+
temperature=0.1,
|
228 |
+
max_tokens=300
|
|
|
229 |
)
|
230 |
|
231 |
+
analysis = response.choices[0].message.content.strip()
|
232 |
+
print(f"Tool analysis LLM response: '{analysis}'")
|
233 |
|
234 |
+
if analysis == "NO_TOOL_NEEDED":
|
|
|
235 |
return None, None
|
236 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
237 |
try:
|
238 |
+
tool_call = json.loads(analysis)
|
239 |
+
if isinstance(tool_call, dict) and "server_name" in tool_call and "tool_name" in tool_call:
|
240 |
+
return tool_call.get("server_name"), {
|
241 |
+
"tool_name": tool_call.get("tool_name"),
|
242 |
+
"parameters": tool_call.get("parameters", {})
|
|
|
243 |
}
|
244 |
else:
|
245 |
+
print(f"LLM response for tool call was not a valid JSON with required keys: {analysis}")
|
246 |
return None, None
|
247 |
+
except json.JSONDecodeError:
|
248 |
+
print(f"Failed to parse tool call JSON from LLM: {analysis}")
|
249 |
return None, None
|
250 |
|
251 |
except Exception as e:
|
252 |
+
print(f"Error analyzing message for tool calls: {str(e)}")
|
|
|
|
|
253 |
return None, None
|
254 |
|
255 |
def respond(
|
256 |
+
message,
|
257 |
+
image_files,
|
258 |
+
history: list[tuple[str, str]],
|
259 |
+
system_message,
|
260 |
+
max_tokens,
|
261 |
+
temperature,
|
262 |
+
top_p,
|
263 |
+
frequency_penalty,
|
264 |
+
seed,
|
265 |
+
provider,
|
266 |
+
custom_api_key,
|
267 |
+
custom_model,
|
268 |
+
model_search_term,
|
269 |
+
selected_model,
|
270 |
+
mcp_enabled=False,
|
271 |
+
active_mcp_servers=None,
|
272 |
+
mcp_interaction_mode="Natural Language"
|
273 |
):
|
274 |
+
print(f"Received message: {message}")
|
275 |
+
print(f"Received {len(image_files) if image_files else 0} images")
|
276 |
+
# print(f"History: {history}") # Can be verbose
|
277 |
+
print(f"System message: {system_message}")
|
278 |
+
print(f"Max tokens: {max_tokens}, Temperature: {temperature}, Top-P: {top_p}")
|
279 |
+
print(f"Frequency Penalty: {frequency_penalty}, Seed: {seed}")
|
280 |
+
print(f"Selected provider: {provider}")
|
281 |
+
print(f"Custom API Key provided: {bool(custom_api_key.strip())}")
|
282 |
+
print(f"Selected model (custom_model): {custom_model}")
|
283 |
+
print(f"Model search term: {model_search_term}")
|
284 |
+
print(f"Selected model from radio: {selected_model}")
|
285 |
+
print(f"MCP enabled: {mcp_enabled}")
|
286 |
+
print(f"Active MCP servers: {active_mcp_servers}")
|
287 |
+
print(f"MCP interaction mode: {mcp_interaction_mode}")
|
288 |
+
|
289 |
+
token_to_use = custom_api_key if custom_api_key.strip() != "" else ACCESS_TOKEN
|
290 |
|
291 |
+
if custom_api_key.strip() != "":
|
292 |
+
print("USING CUSTOM API KEY: BYOK token provided by user is being used for authentication")
|
293 |
+
else:
|
294 |
+
print("USING DEFAULT API KEY: Environment variable HF_TOKEN is being used for authentication")
|
295 |
|
296 |
+
client = InferenceClient(token=token_to_use, provider=provider)
|
297 |
+
print(f"Hugging Face Inference Client initialized with {provider} provider.")
|
298 |
+
|
299 |
+
if seed == -1:
|
300 |
+
seed = None
|
301 |
+
|
302 |
+
model_to_use = custom_model.strip() if custom_model.strip() != "" else selected_model
|
303 |
+
print(f"Model selected for inference: {model_to_use}")
|
304 |
+
|
305 |
+
if mcp_enabled and message:
|
306 |
+
if message.startswith("/mcp"):
|
307 |
+
command_parts = message.split(" ", 3)
|
|
|
308 |
if len(command_parts) < 3:
|
309 |
yield "Invalid MCP command. Format: /mcp <server_name> <tool_name> [arguments_json]"
|
310 |
return
|
311 |
|
312 |
+
_, server_name, tool_name = command_parts[:3]
|
313 |
+
args_json = "{}" if len(command_parts) < 4 else command_parts[3]
|
|
|
|
|
314 |
|
315 |
try:
|
316 |
+
args_dict = json.loads(args_json)
|
317 |
+
result = call_mcp_tool(server_name, tool_name, **args_dict)
|
318 |
+
|
319 |
+
if isinstance(result, dict) and result.get("type") == "audio_b64":
|
320 |
+
yield f"<audio controls src=\"data:audio/wav;base64,{result.get('data')}\"></audio>"
|
321 |
+
elif isinstance(result, dict) and "error" in result:
|
322 |
+
yield f"Error: {result.get('error')}"
|
323 |
+
elif isinstance(result, dict):
|
324 |
+
yield json.dumps(result, indent=2)
|
325 |
+
else:
|
326 |
+
yield str(result)
|
327 |
+
return
|
328 |
except json.JSONDecodeError:
|
329 |
+
yield f"Invalid JSON arguments: {args_json}"
|
330 |
return
|
331 |
+
except Exception as e:
|
332 |
+
yield f"Error executing MCP command: {str(e)}"
|
333 |
return
|
334 |
+
elif mcp_interaction_mode == "Natural Language" and active_mcp_servers and active_mcp_servers:
|
335 |
+
print("Attempting natural language tool call detection...")
|
336 |
+
server_name, tool_info = analyze_message_for_tool_call(
|
337 |
+
message, active_mcp_servers, client, model_to_use, system_message
|
|
|
|
|
|
|
|
|
|
|
|
|
338 |
)
|
339 |
|
340 |
+
if server_name and tool_info and tool_info.get("tool_name"):
|
341 |
+
try:
|
342 |
+
print(f"Calling tool via natural language: {server_name}.{tool_info['tool_name']} with parameters: {tool_info['parameters']}")
|
343 |
+
result = call_mcp_tool(server_name, tool_info['tool_name'], **tool_info.get('parameters', {}))
|
344 |
+
|
345 |
+
response_message = f"I used the **{tool_info['tool_name']}** tool from **{server_name}**."
|
346 |
+
if isinstance(result, dict) and result.get("message"):
|
347 |
+
response_message += f" ({result.get('message')})"
|
348 |
+
response_message += "\n\n"
|
349 |
+
|
350 |
+
if isinstance(result, dict) and result.get("type") == "audio_b64":
|
351 |
+
audio_html = f"<audio controls src=\"data:audio/wav;base64,{result.get('data')}\"></audio>"
|
352 |
+
yield response_message + audio_html
|
353 |
+
elif isinstance(result, dict) and "error" in result:
|
354 |
+
result_str = f"Tool Error: {result.get('error')}"
|
355 |
+
yield response_message + result_str
|
356 |
+
elif isinstance(result, dict):
|
357 |
+
result_str = f"Result:\n```json\n{json.dumps(result, indent=2)}\n```"
|
358 |
+
yield response_message + result_str
|
359 |
+
else:
|
360 |
+
result_str = f"Result:\n{str(result)}"
|
361 |
+
yield response_message + result_str
|
362 |
+
return
|
363 |
+
except Exception as e:
|
364 |
+
print(f"Error executing MCP tool via natural language: {str(e)}")
|
365 |
+
# yield f"Sorry, I encountered an error trying to use the tool: {str(e)}"
|
366 |
+
# Fall through to normal LLM response if tool call fails here
|
367 |
+
else:
|
368 |
+
print("No tool call detected by natural language analysis or tool_info incomplete.")
|
369 |
|
370 |
+
|
371 |
+
user_content_parts = []
|
372 |
+
if message and message.strip():
|
373 |
+
user_content_parts.append({"type": "text", "text": message})
|
374 |
|
375 |
+
if image_files and len(image_files) > 0:
|
376 |
+
for img_path in image_files:
|
377 |
+
if img_path:
|
378 |
+
try:
|
379 |
+
encoded_image = encode_image(img_path)
|
380 |
+
if encoded_image:
|
381 |
+
user_content_parts.append({
|
382 |
+
"type": "image_url",
|
383 |
+
"image_url": {"url": f"data:image/jpeg;base64,{encoded_image}"}
|
384 |
+
})
|
385 |
+
except Exception as e:
|
386 |
+
print(f"Error encoding image {img_path}: {e}")
|
387 |
|
388 |
+
if not user_content_parts: # If message was only /mcp command and processed
|
389 |
+
print("No further content for LLM after MCP command processing.")
|
390 |
+
# This might happen if an MCP command was fully handled and returned.
|
391 |
+
# If yield was used, the function already exited. If not, we might need to ensure no LLM call.
|
392 |
+
# However, the logic above for MCP commands uses `yield ...; return`, so this path might not be hit often.
|
393 |
+
# If it *is* hit, it means the MCP command didn't yield, and we should not proceed to LLM.
|
394 |
+
if message and message.startswith("/mcp"):
|
395 |
+
return # Ensure we don't fall through after a command that should have yielded.
|
396 |
+
|
397 |
+
|
398 |
+
final_user_content = user_content_parts if len(user_content_parts) > 1 else (user_content_parts[0] if user_content_parts else "")
|
399 |
+
|
400 |
+
augmented_system_message = system_message
|
401 |
+
if mcp_enabled and active_mcp_servers:
|
402 |
+
tool_list_for_prompt = []
|
403 |
+
for server_name_iter in active_mcp_servers:
|
|
|
|
|
|
|
|
|
|
|
|
|
404 |
if server_name_iter in mcp_connections:
|
405 |
+
server_tools_str = list_mcp_tools(server_name_iter)
|
406 |
+
if server_tools_str and "not connected" not in server_tools_str and "No tools available" not in server_tools_str:
|
407 |
+
tool_list_for_prompt.append(f"From server '{server_name_iter}':\n{server_tools_str}")
|
408 |
|
409 |
+
if tool_list_for_prompt:
|
410 |
+
mcp_tools_description = "\n\n".join(tool_list_for_prompt)
|
|
|
|
|
|
|
|
|
|
|
411 |
|
412 |
+
if mcp_interaction_mode == "Command Mode":
|
413 |
+
augmented_system_message += f"\n\nYou have access to the following MCP tools. To use them, type a command in the format: /mcp <server_name> <tool_name> <arguments_json>\nTools:\n{mcp_tools_description}"
|
414 |
+
else: # Natural Language
|
415 |
+
augmented_system_message += f"\n\nYou have access to the following MCP tools. You can ask to use them in natural language, and I will try to detect when a tool is needed. If I miss it, you can try being more explicit about the tool name.\nTools:\n{mcp_tools_description}"
|
416 |
|
417 |
+
messages_for_api = [{"role": "system", "content": augmented_system_message}]
|
418 |
+
print("Initial messages array constructed.")
|
419 |
+
|
420 |
+
for val in history:
|
421 |
+
past_user_msg, past_assistant_msg = val
|
422 |
|
423 |
+
# Handle past user messages (could be text or multimodal)
|
424 |
+
if past_user_msg:
|
425 |
+
if isinstance(past_user_msg, list): # Already multimodal
|
426 |
+
messages_for_api.append({"role": "user", "content": past_user_msg})
|
427 |
+
elif isinstance(past_user_msg, str): # Text only
|
428 |
+
messages_for_api.append({"role": "user", "content": past_user_msg})
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
429 |
|
430 |
+
if past_assistant_msg:
|
431 |
+
messages_for_api.append({"role": "assistant", "content": past_assistant_msg})
|
432 |
|
433 |
+
if final_user_content: # Add current user message if it exists
|
434 |
+
messages_for_api.append({"role": "user", "content": final_user_content})
|
435 |
+
|
436 |
+
print(f"Latest user message appended (content type: {type(final_user_content)})")
|
437 |
+
# print(f"Full messages_for_api: {json.dumps(messages_for_api, indent=2)}") # Can be very verbose
|
438 |
|
439 |
+
llm_response_text = ""
|
440 |
+
print(f"Sending request to {provider} provider for model {model_to_use}.")
|
441 |
|
442 |
+
parameters = {
|
443 |
+
"max_tokens": max_tokens,
|
444 |
+
"temperature": temperature,
|
445 |
+
"top_p": top_p,
|
446 |
+
"frequency_penalty": frequency_penalty,
|
447 |
}
|
448 |
+
|
449 |
+
if seed is not None:
|
450 |
+
parameters["seed"] = seed
|
451 |
|
|
|
|
|
452 |
try:
|
453 |
+
stream = client.chat_completion(
|
454 |
+
model=model_to_use,
|
455 |
+
messages=messages_for_api,
|
456 |
stream=True,
|
457 |
+
**parameters
|
458 |
)
|
459 |
|
460 |
+
# print("Received tokens: ", end="", flush=True) # Can be too noisy
|
461 |
+
|
462 |
+
for chunk in stream:
|
463 |
if hasattr(chunk, 'choices') and len(chunk.choices) > 0:
|
464 |
+
if hasattr(chunk.choices[0], 'delta') and hasattr(chunk.choices[0].delta, 'content'):
|
465 |
+
token_text = chunk.choices[0].delta.content
|
466 |
+
if token_text:
|
467 |
+
# print(token_text, end="", flush=True) # Can be too noisy
|
468 |
+
llm_response_text += token_text
|
469 |
+
yield llm_response_text
|
470 |
+
|
471 |
+
# print() # Newline after tokens
|
472 |
+
except Exception as e:
|
473 |
+
print(f"Error during LLM inference: {e}")
|
474 |
+
llm_response_text += f"\nLLM Error: {str(e)}"
|
475 |
+
yield llm_response_text
|
|
|
|
|
476 |
|
477 |
+
print("Completed LLM response generation.")
|
478 |
|
479 |
|
480 |
# GRADIO UI
|
481 |
+
with gr.Blocks(theme="Nymbo/Nymbo_Theme") as demo:
|
|
|
482 |
chatbot = gr.Chatbot(
|
|
|
483 |
height=600,
|
484 |
show_copy_button=True,
|
485 |
+
placeholder="Select a model and begin chatting. Supports multiple inference providers, multimodal inputs, and MCP tools.",
|
486 |
+
layout="panel",
|
487 |
+
show_label=False,
|
488 |
+
render=False # Delay rendering
|
489 |
)
|
490 |
+
print("Chatbot interface created.")
|
491 |
|
492 |
with gr.Row():
|
493 |
+
msg = gr.MultimodalTextbox(
|
494 |
+
placeholder="Type a message or upload images...",
|
495 |
show_label=False,
|
496 |
container=False,
|
497 |
scale=12,
|
498 |
+
file_types=["image"],
|
499 |
+
file_count="multiple",
|
500 |
+
sources=["upload"],
|
501 |
+
render=False # Delay rendering
|
502 |
)
|
|
|
503 |
|
504 |
+
chatbot.render()
|
505 |
+
msg.render()
|
506 |
+
|
507 |
+
with gr.Accordion("Settings", open=False):
|
508 |
+
system_message_box = gr.Textbox(
|
509 |
+
value="You are a helpful AI assistant that can understand images and text. If the user asks you to use a tool, try your best.",
|
510 |
+
placeholder="You are a helpful assistant.",
|
511 |
+
label="System Prompt"
|
512 |
)
|
513 |
|
514 |
with gr.Row():
|
515 |
with gr.Column(scale=1):
|
516 |
+
max_tokens_slider = gr.Slider(minimum=1, maximum=8192, value=1024, step=1, label="Max tokens")
|
517 |
+
temperature_slider = gr.Slider(minimum=0.0, maximum=2.0, value=0.7, step=0.01, label="Temperature")
|
518 |
+
top_p_slider = gr.Slider(minimum=0.0, maximum=1.0, value=0.95, step=0.01, label="Top-P")
|
519 |
with gr.Column(scale=1):
|
520 |
+
frequency_penalty_slider = gr.Slider(minimum=-2.0, maximum=2.0, value=0.0, step=0.1, label="Frequency Penalty")
|
521 |
+
seed_slider = gr.Slider(minimum=-1, maximum=65535, value=-1, step=1, label="Seed (-1 for random)")
|
522 |
|
523 |
+
providers_list = ["hf-inference", "cerebras", "together", "sambanova", "novita", "cohere", "fireworks-ai", "hyperbolic", "nebius"]
|
524 |
+
provider_radio = gr.Radio(choices=providers_list, value="hf-inference", label="Inference Provider")
|
525 |
+
byok_textbox = gr.Textbox(value="", label="BYOK (Bring Your Own Key)", info="Enter a custom Hugging Face API key here. If empty, only 'hf-inference' provider can be used with the shared token.", placeholder="Enter your Hugging Face API token", type="password")
|
526 |
+
custom_model_box = gr.Textbox(value="", label="Custom Model ID", info="(Optional) Provide a custom Hugging Face model ID. Overrides selected featured model.", placeholder="meta-llama/Llama-3.1-70B-Instruct")
|
|
|
|
|
|
|
|
|
|
|
527 |
|
528 |
+
model_search_box = gr.Textbox(label="Filter Featured Models", placeholder="Search for a featured model...", lines=1)
|
529 |
|
530 |
+
models_list = [
|
531 |
+
"meta-llama/Llama-3.1-405B-Instruct-FP8", # Large model, might be slow/expensive
|
532 |
+
"meta-llama/Llama-3.1-70B-Instruct",
|
533 |
+
"meta-llama/Llama-3.1-8B-Instruct",
|
534 |
"mistralai/Mistral-Nemo-Instruct-2407",
|
535 |
+
"Qwen/Qwen2-72B-Instruct",
|
536 |
+
"Qwen/Qwen2-57B-A14B-Instruct",
|
537 |
+
"CohereForAI/c4ai-command-r-plus",
|
538 |
+
# Multimodal models
|
539 |
+
"Salesforce/LlavaLlama3-8b-hf",
|
540 |
+
"llava-hf/llava-v1.6-mistral-7b-hf",
|
541 |
+
"llava-hf/llava-v1.6-vicuna-13b-hf",
|
542 |
+
"microsoft/Phi-3-vision-128k-instruct",
|
543 |
+
"google/paligemma-3b-mix-448",
|
544 |
+
# Older but still popular
|
545 |
+
"NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO",
|
546 |
+
"mistralai/Mixtral-8x7B-Instruct-v0.1",
|
547 |
+
"mistralai/Mistral-7B-Instruct-v0.3",
|
548 |
]
|
549 |
+
featured_model_radio = gr.Radio(label="Select a Featured Model", choices=models_list, value="meta-llama/Llama-3.1-8B-Instruct", interactive=True)
|
550 |
+
gr.Markdown("[View all Text-to-Text models](https://huggingface.co/models?pipeline_tag=text-generation&sort=trending) | [View all multimodal models](https://huggingface.co/models?pipeline_tag=image-to-text&sort=trending)")
|
|
|
551 |
|
552 |
+
with gr.Accordion("MCP Settings", open=False):
|
553 |
+
mcp_enabled_checkbox = gr.Checkbox(label="Enable MCP Support", value=False, info="Enable Model Context Protocol support to connect to external tools and services")
|
|
|
554 |
with gr.Row():
|
555 |
+
mcp_server_url = gr.Textbox(label="MCP Server URL", placeholder="https://your-mcp-server.hf.space/gradio_api/mcp/sse", info="URL of the MCP server (usually ends with /gradio_api/mcp/sse for Gradio MCP servers)")
|
556 |
+
mcp_server_name = gr.Textbox(label="Server Name (Optional)", placeholder="e.g., kokoroTTS", info="A friendly name to identify this server")
|
557 |
+
mcp_connect_button = gr.Button("Connect to MCP Server")
|
558 |
|
559 |
+
mcp_status = gr.Textbox(label="MCP Connection Status", placeholder="No MCP servers connected", interactive=False)
|
560 |
+
active_mcp_servers = gr.Dropdown(label="Active MCP Servers for Chat", choices=[], multiselect=True, info="Select which connected MCP servers to make available to the LLM for this chat session")
|
561 |
+
mcp_mode = gr.Radio(label="MCP Interaction Mode", choices=["Natural Language", "Command Mode"], value="Natural Language", info="Choose how to interact with MCP tools")
|
562 |
|
563 |
+
gr.Markdown("""
|
564 |
+
### MCP Interaction Modes & Examples
|
565 |
+
**Natural Language Mode**: Describe what you want.
|
566 |
+
`Please say 'Hello world' using the kokoroTTS server.`
|
567 |
+
`Use my speech tool to read this: "Welcome"`
|
568 |
|
569 |
+
**Command Mode**: Use structured commands (server name must match connected server's friendly name).
|
570 |
+
`/mcp <server_name> <tool_name> {"param1": "value1"}`
|
571 |
+
Example: `/mcp kokoroTTS text_to_audio {"text": "Hello world", "speed": 1.0}`
|
572 |
+
""")
|
573 |
+
|
574 |
+
# Chat history state
|
575 |
+
# The chatbot component itself manages history for display.
|
576 |
+
# The `respond` function receives this display history and reconstructs API history.
|
577 |
+
|
578 |
+
def filter_models_ui_update(search_term):
|
579 |
+
print(f"Filtering models with search term: {search_term}")
|
580 |
+
filtered = [m for m in models_list if search_term.lower() in m.lower()]
|
581 |
+
if not filtered: # If search yields no results, show all models
|
582 |
+
filtered = models_list
|
583 |
+
print(f"Filtered models: {filtered}")
|
584 |
+
return gr.Radio(choices=filtered, label="Select a Featured Model", value=featured_model_radio.value if featured_model_radio.value in filtered else (filtered[0] if filtered else None))
|
585 |
+
|
586 |
+
def set_custom_model_from_radio_ui_update(selected_featured_model):
|
587 |
+
print(f"Featured model selected: {selected_featured_model}")
|
588 |
+
return selected_featured_model # This updates the custom_model_box
|
589 |
+
|
590 |
+
def connect_mcp_server_ui_update(url, name_optional):
|
591 |
+
actual_name, status_msg = connect_to_mcp_server(url, name_optional)
|
592 |
+
updated_server_choices = list(mcp_connections.keys())
|
593 |
+
# Keep existing selection if possible
|
594 |
+
current_selection = active_mcp_servers.value if active_mcp_servers.value else []
|
595 |
+
valid_selection = [s for s in current_selection if s in updated_server_choices]
|
596 |
+
if actual_name and actual_name not in valid_selection: # Auto-select newly connected server
|
597 |
+
valid_selection.append(actual_name)
|
598 |
+
|
599 |
+
return status_msg, gr.Dropdown(choices=updated_server_choices, value=valid_selection, label="Active MCP Servers for Chat")
|
600 |
+
|
601 |
+
# This function processes the user's multimodal input and adds it to the chatbot history.
|
602 |
+
# It prepares the history in a way that `bot` can understand.
|
603 |
+
def handle_user_input(multimodal_input, history_list: list):
|
604 |
+
text_content = multimodal_input.get("text", "").strip()
|
605 |
+
files = multimodal_input.get("files", [])
|
606 |
+
|
607 |
+
# This will be the entry for the user's turn in the history
|
608 |
+
user_turn_for_api = []
|
609 |
+
user_turn_for_display = ""
|
610 |
|
611 |
+
if text_content:
|
612 |
+
user_turn_for_api.append({"type": "text", "text": text_content})
|
613 |
+
user_turn_for_display = text_content
|
614 |
+
|
615 |
+
if files:
|
616 |
+
display_files_md = ""
|
617 |
+
for file_path in files:
|
618 |
+
if file_path and isinstance(file_path, str): # Gradio provides temp path
|
619 |
+
encoded_img = encode_image(file_path) # For API
|
620 |
+
if encoded_img:
|
621 |
+
user_turn_for_api.append({"type": "image_url", "image_url": {"url": f"data:image/jpeg;base64,{encoded_img}"}})
|
622 |
+
# For display, Gradio handles showing the image from MultimodalTextbox output
|
623 |
+
# We'll just make a note in the display string
|
624 |
+
display_files_md += f"\n<img src='file={file_path}' style='max-height:150px; display:block;' alt='uploaded image'>" # Gradio can render this!
|
625 |
+
|
626 |
+
if user_turn_for_display:
|
627 |
+
user_turn_for_display += display_files_md
|
628 |
+
else:
|
629 |
+
user_turn_for_display = display_files_md if display_files_md else "Image(s) uploaded"
|
630 |
+
|
631 |
+
|
632 |
+
if not user_turn_for_display and not user_turn_for_api: # Empty input
|
633 |
+
return history_list, multimodal_input # No change
|
634 |
|
635 |
+
# The `respond` function expects history as list of [user_api_content, assistant_text_content]
|
636 |
+
# For the current turn, we add [user_api_content, None]
|
637 |
+
# The display history for chatbot is [user_display_content, assistant_text_content]
|
638 |
|
639 |
+
# We pass the API-formatted user turn to the `message` arg of `respond`
|
640 |
+
# and the existing history to the `history` arg.
|
641 |
+
# The chatbot's display history is updated here.
|
642 |
+
|
643 |
+
history_list.append([user_turn_for_display, None])
|
644 |
+
return history_list, user_turn_for_api # Return updated history and the API formatted current message
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
645 |
|
|
|
|
|
646 |
|
647 |
+
# The bot function that calls `respond` generator
|
648 |
+
def call_bot_responder(history_list_for_display, current_user_api_content, sys_msg, max_tok, temp, top_p_val, freq_pen, seed_val, prov, api_key_val, cust_model, _search, sel_model, mcp_on, active_servs, mcp_inter_mode):
|
649 |
+
if not current_user_api_content and not (history_list_for_display and history_list_for_display[-1][0]):
|
650 |
+
print("Bot called with no current message and no history, skipping.")
|
651 |
+
yield history_list_for_display # No change
|
652 |
+
return
|
653 |
|
654 |
+
# Reconstruct API history from display history
|
655 |
+
# `respond` expects history as list of [user_api_content, assistant_text_content]
|
656 |
+
# The current `history_list_for_display` is [user_display, assistant_text]
|
657 |
+
# This reconstruction is tricky because display != api format.
|
658 |
+
# For simplicity, we'll pass only the text part of history to `respond` for now,
|
659 |
+
# and the full current_user_api_content for the current message.
|
660 |
+
# A more robust solution would store API history separately.
|
661 |
|
662 |
+
# Simplified history for `respond` (text only from past turns)
|
663 |
+
# The `respond` function itself needs to be robust to handle this.
|
664 |
+
# Let's adjust `respond` to take `message` (current API content) and `image_files` (current files)
|
665 |
+
# and `history` (past turns, which we'll simplify here).
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
666 |
|
667 |
+
# The `respond` function is already structured to take `message` (text) and `image_files`
|
668 |
+
# The `current_user_api_content` is what we need to pass as `message` (if text) or `image_files`
|
|
|
|
|
669 |
|
670 |
+
current_message_text = ""
|
671 |
+
current_image_paths = []
|
672 |
+
|
673 |
+
if isinstance(current_user_api_content, list): # Multimodal
|
674 |
+
for part in current_user_api_content:
|
675 |
+
if part["type"] == "text":
|
676 |
+
current_message_text = part["text"]
|
677 |
+
elif part["type"] == "image_url":
|
678 |
+
# We can't easily get back the path from base64 for `respond`'s current design
|
679 |
+
# This indicates a slight mismatch. `respond` expects paths for current images.
|
680 |
+
# For now, let's assume `respond` can handle base64 if passed correctly.
|
681 |
+
# Or, we modify `handle_user_input` to also pass original paths if needed by `respond`.
|
682 |
+
# Let's assume `respond`'s `image_files` param can take base64 strings for now.
|
683 |
+
# This is a simplification.
|
684 |
+
# The `encode_image` in `respond` expects paths.
|
685 |
+
# For now, we'll pass None for image_files if it's already in current_user_api_content.
|
686 |
+
# This part needs careful review of how `respond` handles current images.
|
687 |
+
# The `respond` function's `image_files` parameter is for new uploads.
|
688 |
+
# If `current_user_api_content` already has encoded images, `respond` should use that.
|
689 |
+
# The `respond` function's first two args are `message` (text) and `image_files` (paths).
|
690 |
+
# We need to extract these from `current_user_api_content`.
|
691 |
+
pass # Images are part of `current_user_api_content` which is passed to `messages_for_api`
|
692 |
+
elif isinstance(current_user_api_content, str): # Text only
|
693 |
+
current_message_text = current_user_api_content
|
694 |
+
|
695 |
+
# Simplified history for `respond` (text from display)
|
696 |
+
# `respond` will reconstruct its own API history.
|
697 |
+
simplified_past_history = []
|
698 |
+
if len(history_list_for_display) > 1: # Exclude current turn
|
699 |
+
for user_disp, assistant_text in history_list_for_display[:-1]:
|
700 |
+
# Extract text from user_disp for simplified history
|
701 |
+
user_text_for_hist = user_disp
|
702 |
+
if isinstance(user_disp, str) and "<img src" in user_disp : # Basic check if it was image display
|
703 |
+
# Try to find text part if any, otherwise empty
|
704 |
+
lines = user_disp.splitlines()
|
705 |
+
text_lines = [line for line in lines if not line.strip().startswith("<img")]
|
706 |
+
user_text_for_hist = "\n".join(text_lines).strip() if text_lines else ""
|
707 |
+
|
708 |
+
simplified_past_history.append([user_text_for_hist, assistant_text])
|
709 |
+
|
710 |
+
|
711 |
+
# The `respond` function's first argument is `message` (current text)
|
712 |
+
# and `image_files` (current image paths).
|
713 |
+
# We need to extract these from `current_user_api_content` if it was prepared by `handle_user_input`.
|
714 |
+
# For now, let's assume `respond` will get the full `current_user_api_content` via `messages_for_api`.
|
715 |
+
# The first two args of `respond` are for the *current* turn's text and image paths.
|
716 |
+
|
717 |
+
# Let's get current text and image paths from `current_user_api_content`
|
718 |
+
# This is slightly redundant as `respond` also reconstructs this, but for clarity:
|
719 |
+
_current_text_for_respond = ""
|
720 |
+
_current_image_paths_for_respond = [] # `respond` expects paths
|
721 |
+
|
722 |
+
if isinstance(current_user_api_content, list):
|
723 |
+
for item in current_user_api_content:
|
724 |
+
if item['type'] == 'text':
|
725 |
+
_current_text_for_respond = item['text']
|
726 |
+
# We can't get paths back from base64 easily.
|
727 |
+
# This highlights that `respond` needs to be able to take already processed multimodal content.
|
728 |
+
# For now, we'll assume `respond` internally uses the `messages_for_api` which has the full content.
|
729 |
+
# So, we can pass `_current_text_for_respond` and `None` for image_files if images are already in API format.
|
730 |
+
|
731 |
+
|
732 |
+
bot_response_stream = respond(
|
733 |
+
message=_current_text_for_respond, # Current text
|
734 |
+
image_files=None, # Assume images are handled by messages_for_api construction in respond
|
735 |
+
history=simplified_past_history, # Past turns
|
736 |
+
system_message=sys_msg,
|
737 |
+
max_tokens=max_tok,
|
738 |
+
temperature=temp,
|
739 |
+
top_p=top_p_val,
|
740 |
+
frequency_penalty=freq_pen,
|
741 |
+
seed=seed_val,
|
742 |
+
provider=prov,
|
743 |
+
custom_api_key=api_key_val,
|
744 |
+
custom_model=cust_model,
|
745 |
+
model_search_term="", # Not directly used by respond
|
746 |
+
selected_model=sel_model,
|
747 |
+
mcp_enabled=mcp_on,
|
748 |
+
active_mcp_servers=active_servs,
|
749 |
+
mcp_interaction_mode=mcp_inter_mode
|
750 |
+
)
|
751 |
+
|
752 |
+
for response_chunk in bot_response_stream:
|
753 |
+
history_list_for_display[-1][1] = response_chunk
|
754 |
+
yield history_list_for_display
|
755 |
|
|
|
|
|
|
|
|
|
|
|
|
|
756 |
|
757 |
+
# This state will hold the API-formatted content of the current user message
|
758 |
+
current_api_message_state = gr.State(None)
|
759 |
|
760 |
+
msg.submit(
|
761 |
+
handle_user_input,
|
762 |
+
[msg, chatbot], # chatbot here is the history_list
|
763 |
+
[chatbot, current_api_message_state] # Update history display and current_api_message_state
|
764 |
+
).then(
|
765 |
+
call_bot_responder,
|
766 |
+
[chatbot, current_api_message_state, system_message_box, max_tokens_slider, temperature_slider, top_p_slider,
|
767 |
+
frequency_penalty_slider, seed_slider, provider_radio, byok_textbox, custom_model_box,
|
768 |
+
model_search_box, featured_model_radio, mcp_enabled_checkbox, active_mcp_servers, mcp_mode],
|
769 |
+
[chatbot] # Update chatbot display with streaming response
|
770 |
+
).then(
|
771 |
+
lambda: gr.MultimodalTextbox(value={"text": "", "files": []}), # Clear MultimodalTextbox
|
772 |
+
None,
|
773 |
+
[msg]
|
774 |
+
)
|
775 |
+
|
776 |
+
mcp_connect_button.click(
|
777 |
+
connect_mcp_server_ui_update,
|
778 |
+
[mcp_server_url, mcp_server_name],
|
779 |
+
[mcp_status, active_mcp_servers]
|
780 |
+
)
|
781 |
+
|
782 |
+
model_search_box.change(fn=filter_models_ui_update, inputs=model_search_box, outputs=featured_model_radio)
|
783 |
+
featured_model_radio.change(fn=set_custom_model_from_radio_ui_update, inputs=featured_model_radio, outputs=custom_model_box)
|
784 |
+
|
785 |
+
def validate_provider_ui_update(api_key, current_provider):
|
786 |
+
if not api_key.strip() and current_provider != "hf-inference":
|
787 |
+
gr.Info("No API key provided. Defaulting to 'hf-inference' provider.")
|
788 |
+
return gr.Radio(value="hf-inference") # Update provider_radio
|
789 |
+
return gr.Radio(value=current_provider) # No change needed or keep current
|
790 |
|
791 |
+
byok_textbox.change(fn=validate_provider_ui_update, inputs=[byok_textbox, provider_radio], outputs=provider_radio)
|
792 |
+
provider_radio.change(fn=validate_provider_ui_update, inputs=[byok_textbox, provider_radio], outputs=provider_radio)
|
793 |
|
794 |
+
print("Gradio interface initialized.")
|
795 |
|
796 |
if __name__ == "__main__":
|
797 |
+
print("Launching the demo application.")
|
798 |
+
demo.queue().launch(show_api=False, debug=False) # mcp_server=False as this is a client
|