Nymbo commited on
Commit
717cd1f
·
verified ·
1 Parent(s): 6f66243

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +631 -492
app.py CHANGED
@@ -5,47 +5,44 @@ import json
5
  import base64
6
  from PIL import Image
7
  import io
8
- import requests # Keep for potential future use, though not directly used in core logic now
9
- from smolagents.mcp_client import MCPClient # Ensure this is correctly installed and importable
 
 
 
 
10
 
11
  ACCESS_TOKEN = os.getenv("HF_TOKEN")
12
- if ACCESS_TOKEN:
13
- print("Access token loaded from HF_TOKEN environment variable.")
14
- else:
15
- print("Warning: HF_TOKEN environment variable not set. Some operations might fail.")
16
 
17
  # Function to encode image to base64
18
- def encode_image(image_path_or_pil):
19
- if not image_path_or_pil:
20
- print("No image path or PIL Image provided")
21
  return None
22
 
23
  try:
24
- if isinstance(image_path_or_pil, Image.Image):
25
- image = image_path_or_pil
26
- print(f"Encoding PIL Image object.")
27
- elif isinstance(image_path_or_pil, str):
28
- print(f"Encoding image from path: {image_path_or_pil}")
29
- if not os.path.exists(image_path_or_pil):
30
- print(f"Error: Image file not found at {image_path_or_pil}")
31
- return None
32
- image = Image.open(image_path_or_pil)
33
  else:
34
- print(f"Error: Unsupported image input type: {type(image_path_or_pil)}")
35
- return None
36
 
 
37
  if image.mode == 'RGBA':
38
  image = image.convert('RGB')
39
 
 
40
  buffered = io.BytesIO()
41
- image.save(buffered, format="JPEG") # Or PNG if preferred, ensure consistency
42
  img_str = base64.b64encode(buffered.getvalue()).decode("utf-8")
43
- print("Image encoded successfully to base64.")
44
  return img_str
45
  except Exception as e:
46
  print(f"Error encoding image: {e}")
47
- import traceback
48
- traceback.print_exc()
49
  return None
50
 
51
  # Dictionary to store active MCP connections
@@ -54,606 +51,748 @@ mcp_connections = {}
54
  def connect_to_mcp_server(server_url, server_name=None):
55
  """Connect to an MCP server and return available tools"""
56
  if not server_url:
57
- return None, "No server URL provided. Please enter a valid URL."
58
 
59
  try:
60
- print(f"Attempting to connect to MCP server at URL: {server_url}")
61
- client = MCPClient({"url": server_url}) # This might block or raise if connection fails
62
- tools = client.get_tools() # This should also be a blocking call until tools are fetched
 
63
 
64
- name = server_name.strip() if server_name and server_name.strip() else f"Server_{len(mcp_connections) + 1}"
 
65
  mcp_connections[name] = {"client": client, "tools": tools, "url": server_url}
66
 
67
- print(f"Successfully connected to MCP server: {name} with {len(tools)} tools.")
68
- return name, f"Successfully connected to '{name}' ({server_url}). Found {len(tools)} tool(s)."
69
  except Exception as e:
70
- print(f"Error connecting to MCP server at {server_url}: {e}")
71
- import traceback
72
- traceback.print_exc()
73
- return None, f"Error connecting to MCP server '{server_url}': {str(e)}"
74
 
75
  def list_mcp_tools(server_name):
76
  """List available tools for a connected MCP server"""
77
  if server_name not in mcp_connections:
78
- return "Server not connected or name not found."
79
 
80
  tools = mcp_connections[server_name]["tools"]
81
  tool_info = []
82
  for tool in tools:
83
- tool_info.append(f"- **{tool.name}**: {tool.description}")
84
 
85
  if not tool_info:
86
- return "No tools available for this server."
87
 
88
  return "\n".join(tool_info)
89
 
90
  def call_mcp_tool(server_name, tool_name, **kwargs):
91
- """Call a specific tool from an MCP server and process its result."""
92
  if server_name not in mcp_connections:
93
- return {"type": "error", "message": f"Server '{server_name}' not connected."}
94
 
95
- mcp_client_instance = mcp_connections[server_name]["client"]
 
 
96
 
 
 
 
 
 
97
  try:
98
- print(f"Calling MCP tool: {server_name}.{tool_name} with args: {kwargs}")
99
- # Assuming mcp_client_instance.call_tool returns an mcp.client.tool.ToolResult object
100
- tool_result = mcp_client_instance.call_tool(tool_name, kwargs)
101
-
102
- if tool_result and tool_result.content:
103
- # Process multiple blocks if present, concatenating text or prioritizing audio
104
- audio_block_found = None
105
- text_parts = []
106
- json_parts = []
107
- other_parts = []
108
-
109
- for block in tool_result.content:
110
- if hasattr(block, 'uri') and isinstance(block.uri, str) and block.uri.startswith('data:audio/'):
111
- audio_block_found = {
112
- "type": "audio",
113
- "data_uri": block.uri,
114
- "name": getattr(block, 'name', 'audio_output.wav')
115
- }
116
- break # Prioritize first audio block
117
- elif hasattr(block, 'text') and block.text is not None:
118
- text_parts.append(str(block.text))
119
- elif hasattr(block, 'json_data') and block.json_data is not None:
120
- try:
121
- json_parts.append(json.dumps(block.json_data, indent=2))
122
- except TypeError:
123
- json_parts.append(str(block.json_data)) # Fallback
124
- else:
125
- other_parts.append(str(block))
126
-
127
- if audio_block_found:
128
- print(f"MCP tool returned audio: {audio_block_found['name']}")
129
- return audio_block_found
130
- elif text_parts:
131
- full_text = "\n".join(text_parts)
132
- print(f"MCP tool returned text: {full_text[:100]}...")
133
- return {"type": "text", "value": full_text}
134
- elif json_parts:
135
- full_json_str = "\n".join(json_parts)
136
- print(f"MCP tool returned JSON string.")
137
- return {"type": "json_string", "value": full_json_str} # Treat as string for display
138
- elif other_parts:
139
- print(f"MCP tool returned other content types.")
140
- return {"type": "text", "value": "\n".join(other_parts)}
141
  else:
142
- print("MCP tool executed but returned no interpretable primary content blocks.")
143
- return {"type": "text", "value": "Tool executed, but returned no standard content (audio/text/json)."}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
144
 
145
- print("MCP tool executed, but ToolResult or its content was empty.")
146
- return {"type": "text", "value": "Tool executed, but returned no content."}
147
  except Exception as e:
148
- print(f"Error calling MCP tool '{tool_name}' or processing its result: {e}")
149
  import traceback
150
  traceback.print_exc()
151
- return {"type": "error", "message": f"Error during MCP tool '{tool_name}' execution: {str(e)}"}
152
 
153
- def analyze_message_for_tool_call(message, active_mcp_servers, llm_client, llm_model_to_use, base_system_message):
154
  """Analyze a message to determine if an MCP tool should be called"""
155
- if not message or not message.strip() or not active_mcp_servers:
156
  return None, None
157
 
158
- tool_info_for_llm = []
159
- for server_name_iter in active_mcp_servers:
160
- if server_name_iter in mcp_connections:
161
- server_tools = mcp_connections[server_name_iter]["tools"]
162
- for tool in server_tools:
163
- # Provide a concise description for the LLM
164
- tool_info_for_llm.append(
165
- f"- Server: '{server_name_iter}', Tool: '{tool.name}', Description: '{tool.description}'"
166
- )
 
 
 
 
 
 
167
 
168
- if not tool_info_for_llm:
169
- print("No active MCP tools found for analysis.")
170
  return None, None
171
 
172
- tools_string_for_llm = "\n".join(tool_info_for_llm)
 
 
173
 
174
- # More robust system prompt for tool detection
175
- analysis_system_prompt = f"""You are an expert assistant that determines if a user's request requires an external tool.
176
- You have access to the following tools:
177
- {tools_string_for_llm}
178
-
179
- Based on the user's message, decide if any of these tools are appropriate.
180
- If a tool is needed, respond ONLY with a JSON object containing:
181
- "server_name": The name of the server providing the tool.
182
- "tool_name": The name of the tool to be called.
183
- "parameters": A dictionary of parameters for the tool, inferred from the user's message. Ensure parameter names match what the tool expects (often 'text', 'query', 'speed', etc.).
184
-
185
- If NO tool is needed, respond ONLY with the exact string: NO_TOOL_NEEDED
186
-
187
- Example 1 (TTS tool):
188
- User: "Can you say 'hello world' for me at a slightly faster speed?"
189
- Response: {{"server_name": "kokoroTTS", "tool_name": "text_to_audio", "parameters": {{"text": "hello world", "speed": 1.2}}}}
190
-
191
- Example 2 (File tool):
192
- User: "Read the content of my_document.txt"
193
- Response: {{"server_name": "FileSystemServer", "tool_name": "readFile", "parameters": {{"path": "my_document.txt"}}}}
194
-
195
- Example 3 (No tool):
196
- User: "What's the weather like today?" (Assuming no weather tool is listed)
197
- Response: NO_TOOL_NEEDED
198
-
199
- User's current message is: "{message}"
200
- Now, provide your decision:"""
201
 
202
  try:
203
- print(f"Sending tool analysis request to LLM model: {llm_model_to_use}")
204
- response = llm_client.chat_completion(
205
- model=llm_model_to_use,
206
  messages=[
207
- # {"role": "system", "content": base_system_message}, # Optional: provide original system message for context
208
- {"role": "user", "content": analysis_system_prompt} # The prompt itself is the user message here
209
  ],
210
- temperature=0.1, # Low temperature for deterministic tool selection
211
- max_tokens=300,
212
- stop=["\n\n"] # Stop early if LLM adds extra verbiage
213
  )
214
 
215
- analysis_text = response.choices[0].message.content.strip()
216
- print(f"LLM tool analysis response: '{analysis_text}'")
217
 
218
- if "NO_TOOL_NEEDED" in analysis_text or analysis_text == "NO_TOOL_NEEDED":
219
- print("LLM determined no tool needed.")
220
  return None, None
221
 
222
- # Try to extract JSON from the response (handle potential markdown code blocks)
223
- if analysis_text.startswith("```json"):
224
- analysis_text = analysis_text.replace("```json", "").replace("```", "").strip()
225
- elif analysis_text.startswith("```"):
226
- analysis_text = analysis_text.replace("```", "").strip()
227
-
228
-
229
- json_start = analysis_text.find("{")
230
- json_end = analysis_text.rfind("}") + 1
231
-
232
- if json_start == -1 or json_end <= json_start:
233
- print(f"Could not find valid JSON object in LLM response: '{analysis_text}'")
234
- return None, None
235
-
236
- json_str = analysis_text[json_start:json_end]
237
  try:
238
- tool_call_data = json.loads(json_str)
239
- if "server_name" in tool_call_data and "tool_name" in tool_call_data:
240
- print(f"LLM suggested tool call: {tool_call_data}")
241
- return tool_call_data.get("server_name"), {
242
- "tool_name": tool_call_data.get("tool_name"),
243
- "parameters": tool_call_data.get("parameters", {})
244
  }
245
  else:
246
- print(f"LLM response parsed as JSON but missing server_name or tool_name: {json_str}")
247
  return None, None
248
- except json.JSONDecodeError as e:
249
- print(f"Failed to parse tool call JSON from LLM response: '{json_str}'. Error: {e}")
250
  return None, None
251
 
252
  except Exception as e:
253
- print(f"Error during LLM analysis for tool calls: {str(e)}")
254
- import traceback
255
- traceback.print_exc()
256
  return None, None
257
 
258
  def respond(
259
- message_text_input, # From user function, this is just the text part
260
- message_files_input, # From user function, this is the list of file paths
261
- history_tuples: list[tuple[tuple[str, list], str]], # History: list of ((user_text, [user_files]), assistant_response)
262
- system_message_prompt,
263
- max_tokens_val,
264
- temperature_val,
265
- top_p_val,
266
- frequency_penalty_val,
267
- seed_val,
268
- provider_choice,
269
- custom_api_key_val,
270
- custom_model_id,
271
- # model_search_term_val, # Not directly used in respond, but kept for signature consistency if UI passes it
272
- selected_hf_model_id,
273
- mcp_is_enabled,
274
- active_mcp_server_names, # List of selected server names
275
- mcp_interaction_mode_choice
276
  ):
277
- print(f"\n--- RESPOND FUNCTION CALLED ---")
278
- print(f"Message Text: '{message_text_input}'")
279
- print(f"Message Files: {message_files_input}")
280
- # print(f"History (first item type if exists): {type(history_tuples) if history_tuples else 'No history'}")
281
- print(f"System Prompt: '{system_message_prompt}'")
282
- print(f"Provider: {provider_choice}, MCP Enabled: {mcp_is_enabled}, MCP Mode: {mcp_interaction_mode_choice}")
283
- print(f"Active MCP Servers: {active_mcp_server_names}")
284
-
285
- token_to_use_for_llm = custom_api_key_val if custom_api_key_val.strip() else ACCESS_TOKEN
286
- if not token_to_use_for_llm and provider_choice != "hf-inference": # Basic check
287
- yield "Error: API Key required for non-hf-inference providers."
288
- return
 
 
 
 
289
 
290
- llm_client_instance = InferenceClient(token=token_to_use_for_llm, provider=provider_choice)
 
 
 
291
 
292
- current_seed = None if seed_val == -1 else seed_val
293
- model_id_for_llm = custom_model_id.strip() if custom_model_id.strip() else selected_hf_model_id
294
- print(f"Using LLM model: {model_id_for_llm} via {provider_choice}")
295
-
296
- # --- MCP Tool Call Logic ---
297
- if mcp_is_enabled and (message_text_input or message_files_input) and active_mcp_server_names:
298
- tool_call_output_dict = None
299
- invoked_tool_display_name = "a tool"
300
- invoked_server_display_name = "an MCP server"
301
-
302
- if message_text_input and message_text_input.startswith("/mcp"):
303
- print("Processing explicit MCP command...")
304
- command_parts = message_text_input.split(" ", 3)
305
  if len(command_parts) < 3:
306
  yield "Invalid MCP command. Format: /mcp <server_name> <tool_name> [arguments_json]"
307
  return
308
 
309
- _, server_name_cmd, tool_name_cmd = command_parts[:3]
310
- invoked_server_display_name = server_name_cmd
311
- invoked_tool_display_name = tool_name_cmd
312
- args_json_str = "{}" if len(command_parts) < 4 else command_parts
313
 
314
  try:
315
- args_dict_cmd = json.loads(args_json_str)
316
- tool_call_output_dict = call_mcp_tool(invoked_server_display_name, invoked_tool_display_name, **args_dict_cmd)
 
 
 
 
 
 
 
 
 
 
317
  except json.JSONDecodeError:
318
- yield f"Invalid JSON arguments for MCP command: {args_json_str}"
319
  return
320
- except Exception as e_cmd:
321
- yield f"Error preparing MCP command: {str(e_cmd)}"
322
  return
323
-
324
- elif mcp_interaction_mode_choice == "Natural Language":
325
- print("Analyzing message for natural language tool call...")
326
- # For natural language, primarily use message_text_input. Files could be context later.
327
- detected_server_nl, tool_info_nl = analyze_message_for_tool_call(
328
- message_text_input,
329
- active_mcp_server_names,
330
- llm_client_instance,
331
- model_id_for_llm,
332
- system_message_prompt
333
  )
334
 
335
- if detected_server_nl and tool_info_nl and tool_info_nl.get("tool_name"):
336
- invoked_server_display_name = detected_server_nl
337
- invoked_tool_display_name = tool_info_nl['tool_name']
338
- tool_params_nl = tool_info_nl.get("parameters", {})
339
- tool_call_output_dict = call_mcp_tool(invoked_server_display_name, invoked_tool_display_name, **tool_params_nl)
340
-
341
- # --- Handle MCP Tool Result (if a tool was called) ---
342
- if tool_call_output_dict:
343
- response_message_parts = [f"I attempted to use the **{invoked_tool_display_name}** tool from **{invoked_server_display_name}**."]
344
-
345
- if tool_call_output_dict.get("type") == "audio":
346
- audio_data_uri = tool_call_output_dict["data_uri"]
347
- audio_html_tag = f"<audio controls src='{audio_data_uri}' title='{tool_call_output_dict.get('name', 'Audio Output')}'></audio>"
348
- response_message_parts.append(f"Here's the audio output:\n{audio_html_tag}")
349
- elif tool_call_output_dict.get("type") == "text":
350
- response_message_parts.append(f"\nResult:\n```\n{tool_call_output_dict['value']}\n```")
351
- elif tool_call_output_dict.get("type") == "json_string": # Changed from "json" to avoid confusion with dict
352
- response_message_parts.append(f"\nResult (JSON):\n```json\n{tool_call_output_dict['value']}\n```")
353
- elif tool_call_output_dict.get("type") == "error":
354
- response_message_parts.append(f"\nUnfortunately, there was an error: {tool_call_output_dict['message']}")
355
- else: # Fallback for unexpected result structure
356
- response_message_parts.append(f"\nThe tool returned: {str(tool_call_output_dict)}")
357
-
358
- yield "\n".join(response_message_parts)
359
- return # End here if a tool was called and processed
 
 
 
 
360
 
361
- # --- Regular LLM Response Logic (if no MCP tool was successfully called and returned primary content) ---
362
- print("Proceeding with standard LLM response generation.")
 
 
363
 
364
- # Prepare current user message for LLM (multimodal if files exist)
365
- current_user_llm_content = []
366
- if message_text_input and message_text_input.strip():
367
- current_user_llm_content.append({"type": "text", "text": message_text_input})
 
 
 
 
 
 
 
 
368
 
369
- if message_files_input:
370
- for file_path in message_files_input:
371
- if file_path: # file_path is already the actual temp path from gr.File or gr.Image
372
- encoded_img_str = encode_image(file_path)
373
- if encoded_img_str:
374
- current_user_llm_content.append({
375
- "type": "image_url",
376
- "image_url": {"url": f"data:image/jpeg;base64,{encoded_img_str}"}
377
- })
378
- else:
379
- print(f"Warning: Failed to encode image {file_path} for LLM.")
380
-
381
- if not current_user_llm_content:
382
- print("No content (text or valid files) in current user message for LLM.")
383
- yield "" # Or some indicator of no action
384
- return
385
-
386
- # Augment system message with MCP tool info if enabled
387
- augmented_sys_msg = system_message_prompt
388
- if mcp_is_enabled and active_mcp_server_names:
389
- mcp_tool_descriptions_for_llm = []
390
- for server_name_iter in active_mcp_server_names:
391
  if server_name_iter in mcp_connections:
392
- # Use the more detailed list_mcp_tools output for the system prompt if desired
393
- tools_list_str = list_mcp_tools(server_name_iter) # This returns markdown
394
- mcp_tool_descriptions_for_llm.append(f"From server '{server_name_iter}':\n{tools_list_str}")
395
 
396
- if mcp_tool_descriptions_for_llm:
397
- full_tools_info_str = "\n\n".join(mcp_tool_descriptions_for_llm)
398
- interaction_advice = ""
399
- if mcp_interaction_mode_choice == "Command Mode":
400
- interaction_advice = "The user can invoke these tools using '/mcp <server_name> <tool_name> <json_args>'."
401
- # For Natural Language mode, the LLM doesn't need explicit instruction in system prompt
402
- # as `analyze_message_for_tool_call` handles that part.
403
 
404
- augmented_sys_msg += f"\n\nYou also have access to the following external tools via Model Context Protocol (MCP):\n{full_tools_info_str}\n{interaction_advice}"
405
-
406
- # Prepare messages list for LLM
407
- messages_for_llm_api = [{"role": "system", "content": augmented_sys_msg}]
408
 
409
- for hist_user_turn, hist_assist_response in history_tuples:
410
- hist_user_text, hist_user_files = hist_user_turn # Unpack ((text, [files]))
 
 
 
411
 
412
- history_user_llm_content = []
413
- if hist_user_text and hist_user_text.strip():
414
- history_user_llm_content.append({"type": "text", "text": hist_user_text})
415
- if hist_user_files:
416
- for hist_file_path in hist_user_files:
417
- encoded_hist_img = encode_image(hist_file_path)
418
- if encoded_hist_img:
419
- history_user_llm_content.append({
420
- "type": "image_url",
421
- "image_url": {"url": f"data:image/jpeg;base64,{encoded_hist_img}"}
422
- })
423
- if history_user_llm_content: # Only add if there's actual content
424
- messages_for_llm_api.append({"role": "user", "content": history_user_llm_content})
425
 
426
- if hist_assist_response and hist_assist_response.strip():
427
- messages_for_llm_api.append({"role": "assistant", "content": hist_assist_response})
428
 
429
- messages_for_llm_api.append({"role": "user", "content": current_user_llm_content})
430
- # print(f"Final messages for LLM API: {json.dumps(messages_for_llm_api, indent=2)}")
 
 
 
431
 
 
 
432
 
433
- llm_parameters = {
434
- "max_tokens": max_tokens_val, "temperature": temperature_val, "top_p": top_p_val,
435
- "frequency_penalty": frequency_penalty_val,
 
 
436
  }
437
- if current_seed is not None:
438
- llm_parameters["seed"] = current_seed
 
439
 
440
- print(f"Sending request to LLM: Model={model_id_for_llm}, Params={llm_parameters}")
441
- streamed_response_text = ""
442
  try:
443
- llm_stream = llm_client_instance.chat_completion(
444
- model=model_id_for_llm,
445
- messages=messages_for_llm_api,
446
  stream=True,
447
- **llm_parameters
448
  )
449
 
450
- # print("Streaming LLM response: ", end="", flush=True)
451
- for chunk in llm_stream:
 
452
  if hasattr(chunk, 'choices') and len(chunk.choices) > 0:
453
- delta = chunk.choices.delta
454
- if hasattr(delta, 'content') and delta.content:
455
- token = delta.content
456
- # print(token, end="", flush=True)
457
- streamed_response_text += token
458
- yield streamed_response_text
459
- # print("\nLLM Stream finished.")
460
- except Exception as e_llm:
461
- error_msg = f"Error during LLM inference: {str(e_llm)}"
462
- print(error_msg)
463
- import traceback
464
- traceback.print_exc()
465
- streamed_response_text += f"\n{error_msg}" # Append error to existing stream if any
466
- yield streamed_response_text
467
 
468
- print(f"--- RESPOND FUNCTION COMPLETED ---")
469
 
470
 
471
  # GRADIO UI
472
- with gr.Blocks(theme="Nymbo/Nymbo_Theme", title="Serverless TextGen Hub + MCP") as demo:
473
- gr.Markdown("# Serverless TextGen Hub with MCP Client")
474
  chatbot = gr.Chatbot(
475
- label="Chat",
476
  height=600,
477
  show_copy_button=True,
478
- placeholder="Select a model, connect MCP servers (optional), and start chatting!",
479
- bubble_full_width=False,
480
- avatar_images=(None, "https://huggingface.co/datasets/huggingface/brand-assets/resolve/main/hf-logo-square.png")
 
481
  )
 
482
 
483
  with gr.Row():
484
- msg_textbox = gr.MultimodalTextbox( # Changed from gr.Textbox to gr.MultimodalTextbox
485
- placeholder="Type a message or upload images... (Use /mcp for commands)",
486
  show_label=False,
487
  container=False,
488
  scale=12,
489
- file_types=["image"], # Can add more types like "audio", "video" if supported by models
490
- file_count="multiple" # Allow multiple image uploads
 
 
491
  )
492
- # submit_button = gr.Button("Send", variant="primary", scale=1, min_width=100) # Optional explicit send button
493
 
494
- with gr.Accordion("LLM Settings", open=False):
495
- system_message_prompt_box = gr.Textbox(
496
- value="You are a helpful and versatile AI assistant. You can understand text and images. If you have access to MCP tools, you can use them when appropriate or when the user asks.",
497
- label="System Prompt", lines=3
 
 
 
 
498
  )
499
 
500
  with gr.Row():
501
  with gr.Column(scale=1):
502
- max_tokens_slider_ui = gr.Slider(minimum=128, maximum=8192, value=1024, step=128, label="Max New Tokens")
503
- temperature_slider_ui = gr.Slider(minimum=0.0, maximum=2.0, value=0.7, step=0.05, label="Temperature")
504
- top_p_slider_ui = gr.Slider(minimum=0.0, maximum=1.0, value=0.95, step=0.05, label="Top-P (Nucleus Sampling)")
505
  with gr.Column(scale=1):
506
- frequency_penalty_slider_ui = gr.Slider(minimum=-2.0, maximum=2.0, value=0.0, step=0.1, label="Frequency Penalty")
507
- seed_slider_ui = gr.Slider(minimum=-1, maximum=65535, value=-1, step=1, label="Seed (-1 for random)")
508
 
509
- providers_list_ui = [
510
- "hf-inference", "cerebras", "together", "sambanova", "novita",
511
- "cohere", "fireworks-ai", "hyperbolic", "nebius",
512
- ]
513
- provider_radio_ui = gr.Radio(choices=providers_list_ui, value="hf-inference", label="Inference Provider")
514
-
515
- byok_textbox_ui = gr.Textbox(label="Your Hugging Face API Key (Optional)", placeholder="Enter HF Token if using non-hf-inference providers or private models", type="password")
516
-
517
- custom_model_id_box = gr.Textbox(label="Custom Model ID (Overrides selection below)", placeholder="e.g., meta-llama/Llama-3-8B-Instruct")
518
 
519
- model_search_box_ui = gr.Textbox(label="Filter Featured Models", placeholder="Search...", lines=1)
520
 
521
- # More diverse model list, including some known multimodal ones
522
- featured_models_list_data = [
523
- "meta-llama/Meta-Llama-3.1-8B-Instruct", # Good default
524
- "meta-llama/Meta-Llama-3.1-70B-Instruct",
525
  "mistralai/Mistral-Nemo-Instruct-2407",
526
- "mistralai/Mixtral-8x22B-Instruct-v0.1",
527
- "Qwen/Qwen2-7B-Instruct",
528
- "microsoft/Phi-3-medium-128k-instruct",
529
- # Multimodal
530
- "Salesforce/blip-image-captioning-large", # Example, might not be chat
531
- "llava-hf/llava-1.5-7b-hf", # LLaVA example
532
- "microsoft/kosmos-2-patch14-224", # Kosmos-2
533
- "google/paligemma-3b-mix-448", # PaliGemma
 
 
 
 
 
534
  ]
535
- featured_model_radio_ui = gr.Radio(label="Select a Featured Model", choices=featured_models_list_data, value="meta-llama/Meta-Llama-3.1-8B-Instruct", interactive=True)
536
-
537
- gr.Markdown("Tip: For multimodal chat, ensure selected model supports image inputs (e.g., LLaVA, PaliGemma, Kosmos-2).")
538
 
539
- with gr.Accordion("MCP Client Settings", open=False):
540
- mcp_enabled_checkbox_ui = gr.Checkbox(label="Enable MCP Support", value=False, info="Connect to external tools and services via MCP.")
541
-
542
  with gr.Row():
543
- mcp_server_url_textbox = gr.Textbox(label="MCP Server URL", placeholder="e.g., https://your-mcp-server.hf.space/gradio_api/mcp/sse")
544
- mcp_server_name_textbox = gr.Textbox(label="Friendly Server Name (Optional)", placeholder="MyTTS_Server")
545
- mcp_connect_button_ui = gr.Button("Connect", variant="secondary")
546
 
547
- mcp_connection_status_textbox = gr.Textbox(label="MCP Connection Status", placeholder="No MCP servers connected.", interactive=False, lines=2)
 
 
548
 
549
- active_mcp_servers_dropdown = gr.Dropdown(
550
- label="Use Tools From (Select Active MCP Servers)", choices=[], multiselect=True,
551
- info="Choose which connected servers the LLM can use tools from."
552
- )
 
553
 
554
- mcp_interaction_mode_radio = gr.Radio(
555
- label="MCP Interaction Mode", choices=["Natural Language", "Command Mode"], value="Natural Language",
556
- info="Natural Language: AI tries to detect tool use. Command Mode: Use '/mcp ...' syntax."
557
- )
558
- gr.Markdown("Example MCP Command: `/mcp MyTTS text_to_audio {\"text\": \"Hello world!\"}`")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
559
 
560
- # --- Event Handlers ---
561
-
562
- # Store history as list of tuples: [ ((user_text, [user_files]), assistant_response), ... ]
563
- chat_history_state = gr.State([])
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
564
 
565
- def user_interaction(user_multimodal_input, current_chat_history):
566
- user_text = user_multimodal_input["text"] if user_multimodal_input and "text" in user_multimodal_input else ""
567
- user_files = user_multimodal_input["files"] if user_multimodal_input and "files" in user_multimodal_input else []
568
 
569
- # Only add to history if there's text or files
570
- if user_text or user_files:
571
- current_chat_history.append( ((user_text, user_files), None) ) # Append user turn, assistant response is None initially
572
- return current_chat_history, gr.update(value={"text": "", "files": []}) # Clear input textbox
573
-
574
- def bot_response_generator(
575
- current_chat_history, system_prompt, max_tokens, temp, top_p_val, freq_penalty, seed_val,
576
- provider_val, api_key_val, custom_model_val, selected_model_val, # Removed search_term as it's not directly used by respond
577
- mcp_enabled_val, active_servers_val, mcp_mode_val
578
- ):
579
- if not current_chat_history or current_chat_history[-1] is not None: # If no user message or last message already has bot response
580
- yield current_chat_history # Or simply `return current_chat_history` if not streaming
581
- return
582
 
583
- user_turn_content, _ = current_chat_history[-1] # Get the latest user turn: (text, [files])
584
- message_text, message_files = user_turn_content
585
 
586
- # The history passed to `respond` should be all turns *before* the current one
587
- history_for_respond = current_chat_history[:-1]
 
 
 
 
588
 
589
- response_stream = respond(
590
- message_text, message_files, history_for_respond,
591
- system_prompt, max_tokens, temp, top_p_val, freq_penalty, seed_val,
592
- provider_val, api_key_val, custom_model_val, selected_model_val,
593
- mcp_enabled_val, active_servers_val, mcp_mode_val
594
- )
 
595
 
596
- full_bot_message = ""
597
- for chunk in response_stream:
598
- full_bot_message = chunk
599
- current_chat_history[-1] = (user_turn_content, full_bot_message) # Update last item's assistant part
600
- yield current_chat_history
601
-
602
- # Link UI components to functions
603
- msg_textbox.submit(
604
- user_interaction,
605
- inputs=[msg_textbox, chat_history_state],
606
- outputs=[chat_history_state, msg_textbox] # Update history and clear input
607
- ).then(
608
- bot_response_generator,
609
- inputs=[
610
- chat_history_state, system_message_prompt_box, max_tokens_slider_ui, temperature_slider_ui,
611
- top_p_slider_ui, frequency_penalty_slider_ui, seed_slider_ui, provider_radio_ui,
612
- byok_textbox_ui, custom_model_id_box, featured_model_radio_ui,
613
- mcp_enabled_checkbox_ui, active_mcp_servers_dropdown, mcp_interaction_mode_radio
614
- ],
615
- outputs=[chatbot] # Stream to chatbot
616
- )
617
 
618
- # MCP Connection
619
- def handle_mcp_connect(url, name_suggestion):
620
- if not url or not url.strip():
621
- return "MCP Server URL cannot be empty.", gr.update(choices=list(mcp_connections.keys()))
622
 
623
- _, status_msg = connect_to_mcp_server(url, name_suggestion)
624
- # Update dropdown choices with current server names
625
- new_choices = list(mcp_connections.keys())
626
- # Preserve selected values if they are still valid connections
627
- # current_selected = active_mcp_servers_dropdown.value # This might not work directly
628
- # new_selected = [s for s in current_selected if s in new_choices]
629
- return status_msg, gr.update(choices=new_choices) #, value=new_selected)
630
-
631
- mcp_connect_button_ui.click(
632
- handle_mcp_connect,
633
- inputs=[mcp_server_url_textbox, mcp_server_name_textbox],
634
- outputs=[mcp_connection_status_textbox, active_mcp_servers_dropdown]
635
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
636
 
637
- # Model Filtering
638
- def filter_featured_models(search_query):
639
- if not search_query:
640
- return gr.update(choices=featured_models_list_data)
641
- filtered = [m for m in featured_models_list_data if search_query.lower() in m.lower()]
642
- return gr.update(choices=filtered if filtered else ["No models match your search"])
643
 
644
- model_search_box_ui.change(filter_featured_models, inputs=model_search_box_ui, outputs=featured_model_radio_ui)
 
645
 
646
- # Auto-select hf-inference if BYOK is empty and other provider is chosen
647
- def validate_api_key_for_provider(api_key_text, current_provider):
648
- if not api_key_text.strip() and current_provider != "hf-inference":
649
- gr.Warning("API Key needed for non-hf-inference providers. Defaulting to hf-inference.")
650
- return gr.update(value="hf-inference")
651
- return current_provider # No change if key provided or hf-inference selected
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
652
 
653
- byok_textbox_ui.change(validate_api_key_for_provider, inputs=[byok_textbox_ui, provider_radio_ui], outputs=provider_radio_ui)
654
- provider_radio_ui.change(validate_api_key_for_provider, inputs=[byok_textbox_ui, provider_radio_ui], outputs=provider_radio_ui)
655
 
 
656
 
657
  if __name__ == "__main__":
658
- print("Launching Gradio demo...")
659
- demo.queue().launch(debug=True, show_api=False) # mcp_server=False as this is a client app
 
5
  import base64
6
  from PIL import Image
7
  import io
8
+ import requests
9
+ from smolagents.mcp_client import MCPClient
10
+ from mcp import ToolResult # For type hinting, good practice
11
+ from mcp.common.content_block import ValueContentBlock # To access the actual tool return value
12
+ import numpy as np # For handling audio array
13
+ import soundfile as sf # For converting audio array to WAV
14
 
15
  ACCESS_TOKEN = os.getenv("HF_TOKEN")
16
+ print("Access token loaded.")
 
 
 
17
 
18
  # Function to encode image to base64
19
+ def encode_image(image_path):
20
+ if not image_path:
21
+ print("No image path provided")
22
  return None
23
 
24
  try:
25
+ print(f"Encoding image from path: {image_path}")
26
+
27
+ # If it's already a PIL Image
28
+ if isinstance(image_path, Image.Image):
29
+ image = image_path
 
 
 
 
30
  else:
31
+ # Try to open the image file
32
+ image = Image.open(image_path)
33
 
34
+ # Convert to RGB if image has an alpha channel (RGBA)
35
  if image.mode == 'RGBA':
36
  image = image.convert('RGB')
37
 
38
+ # Encode to base64
39
  buffered = io.BytesIO()
40
+ image.save(buffered, format="JPEG")
41
  img_str = base64.b64encode(buffered.getvalue()).decode("utf-8")
42
+ print("Image encoded successfully")
43
  return img_str
44
  except Exception as e:
45
  print(f"Error encoding image: {e}")
 
 
46
  return None
47
 
48
  # Dictionary to store active MCP connections
 
51
  def connect_to_mcp_server(server_url, server_name=None):
52
  """Connect to an MCP server and return available tools"""
53
  if not server_url:
54
+ return None, "No server URL provided"
55
 
56
  try:
57
+ # Create an MCP client and connect to the server
58
+ client = MCPClient({"url": server_url})
59
+ # Get available tools
60
+ tools = client.get_tools()
61
 
62
+ # Store the connection for later use
63
+ name = server_name or f"Server_{len(mcp_connections)}"
64
  mcp_connections[name] = {"client": client, "tools": tools, "url": server_url}
65
 
66
+ return name, f"Successfully connected to {name} with {len(tools)} available tools"
 
67
  except Exception as e:
68
+ print(f"Error connecting to MCP server: {e}")
69
+ return None, f"Error connecting to MCP server: {str(e)}"
 
 
70
 
71
  def list_mcp_tools(server_name):
72
  """List available tools for a connected MCP server"""
73
  if server_name not in mcp_connections:
74
+ return "Server not connected"
75
 
76
  tools = mcp_connections[server_name]["tools"]
77
  tool_info = []
78
  for tool in tools:
79
+ tool_info.append(f"- {tool.name}: {tool.description}")
80
 
81
  if not tool_info:
82
+ return "No tools available for this server"
83
 
84
  return "\n".join(tool_info)
85
 
86
  def call_mcp_tool(server_name, tool_name, **kwargs):
87
+ """Call a specific tool from an MCP server"""
88
  if server_name not in mcp_connections:
89
+ return {"error": f"Server '{server_name}' not connected"} # Return dict for consistency
90
 
91
+ client_data = mcp_connections[server_name]
92
+ client = client_data["client"]
93
+ server_tools = client_data["tools"]
94
 
95
+ # Find the requested tool
96
+ tool = next((t for t in server_tools if t.name == tool_name), None)
97
+ if not tool:
98
+ return {"error": f"Tool '{tool_name}' not found on server '{server_name}'"}
99
+
100
  try:
101
+ # Call the tool with provided arguments
102
+ mcp_tool_result: ToolResult = client.call_tool(tool_name=tool_name, arguments=kwargs)
103
+
104
+ actual_result = None
105
+ if mcp_tool_result.content:
106
+ content_block = mcp_tool_result.content[0]
107
+ if isinstance(content_block, ValueContentBlock):
108
+ actual_result = content_block.value
109
+ elif hasattr(content_block, 'text'): # e.g., TextContentBlock
110
+ actual_result = content_block.text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
111
  else:
112
+ actual_result = str(content_block) # Fallback
113
+ else: # No content
114
+ return {"warning": "Tool returned no content."}
115
+
116
+
117
+ # Special handling for audio result (e.g., from Kokoro TTS)
118
+ # This checks if the result is a tuple (sample_rate, audio_data_list)
119
+ # Gradio MCP server serializes numpy arrays to lists.
120
+ if (server_name == "kokoroTTS" and tool_name == "text_to_audio" and
121
+ isinstance(actual_result, tuple) and len(actual_result) == 2 and
122
+ isinstance(actual_result[0], int) and
123
+ (isinstance(actual_result[1], list) or isinstance(actual_result[1], np.ndarray))):
124
+
125
+ print(f"Received audio data from {server_name}.{tool_name}")
126
+ sample_rate, audio_data_list = actual_result
127
+
128
+ # Convert list to numpy array if necessary
129
+ audio_data = np.array(audio_data_list)
130
+
131
+ # Ensure correct dtype for soundfile (float32 is common, or int16)
132
+ # Kokoro returns float, likely in [-1, 1] range.
133
+ if audio_data.dtype != np.float32 and audio_data.dtype != np.int16:
134
+ # Attempt to normalize if it looks like it's not in [-1, 1] for float
135
+ if np.issubdtype(audio_data.dtype, np.floating) and (np.min(audio_data) < -1.1 or np.max(audio_data) > 1.1):
136
+ print(f"Warning: Audio data for {server_name}.{tool_name} might not be normalized. Min: {np.min(audio_data)}, Max: {np.max(audio_data)}")
137
+ audio_data = audio_data.astype(np.float32)
138
+
139
+ wav_io = io.BytesIO()
140
+ sf.write(wav_io, audio_data, sample_rate, format='WAV')
141
+ wav_io.seek(0)
142
+
143
+ wav_b64 = base64.b64encode(wav_io.read()).decode('utf-8')
144
+
145
+ return {
146
+ "type": "audio_b64",
147
+ "data": wav_b64,
148
+ "message": f"Audio generated by {server_name}.{tool_name}"
149
+ }
150
+
151
+ # Handle other types of results
152
+ if isinstance(actual_result, dict):
153
+ return actual_result
154
+ elif isinstance(actual_result, str):
155
+ try: # If string is JSON, parse to dict
156
+ return json.loads(actual_result)
157
+ except json.JSONDecodeError:
158
+ return {"text": actual_result} # Wrap raw string
159
+ else:
160
+ return {"value": str(actual_result)} # Fallback for other primitive types
161
 
 
 
162
  except Exception as e:
163
+ print(f"Error calling MCP tool: {e}")
164
  import traceback
165
  traceback.print_exc()
166
+ return {"error": f"Error calling MCP tool: {str(e)}"}
167
 
168
+ def analyze_message_for_tool_call(message, active_mcp_servers, client, model_to_use, system_message):
169
  """Analyze a message to determine if an MCP tool should be called"""
170
+ if not message or not message.strip():
171
  return None, None
172
 
173
+ tool_info = []
174
+ for server_name in active_mcp_servers:
175
+ if server_name in mcp_connections:
176
+ server_tools_raw = list_mcp_tools(server_name) # This returns a string
177
+ if server_tools_raw != "Server not connected" and server_tools_raw != "No tools available for this server":
178
+ # Parse the string from list_mcp_tools
179
+ for line in server_tools_raw.split("\n"):
180
+ if line.startswith("- "):
181
+ parts = line[2:].split(":", 1)
182
+ if len(parts) == 2:
183
+ tool_info.append({
184
+ "server_name": server_name,
185
+ "tool_name": parts[0].strip(),
186
+ "description": parts[1].strip()
187
+ })
188
 
189
+ if not tool_info:
 
190
  return None, None
191
 
192
+ tools_desc = []
193
+ for info in tool_info:
194
+ tools_desc.append(f"{info['server_name']}.{info['tool_name']}: {info['description']}")
195
 
196
+ tools_string = "\n".join(tools_desc)
197
+
198
+ analysis_system_prompt = f"""You are an assistant that helps determine if a user message requires using an external tool.
199
+ Available tools:
200
+ {tools_string}
201
+
202
+ Your job is to:
203
+ 1. Analyze the user's message.
204
+ 2. Determine if they're asking to use one of the tools.
205
+ 3. If yes, respond ONLY with a JSON object with "server_name", "tool_name", and "parameters".
206
+ 4. If no, respond ONLY with the exact string "NO_TOOL_NEEDED".
207
+
208
+ Example 1 (User wants TTS):
209
+ User: "Please turn this text into speech: Hello world"
210
+ Response: {{"server_name": "kokoroTTS", "tool_name": "text_to_audio", "parameters": {{"text": "Hello world", "speed": 1.0}}}}
211
+
212
+ Example 2 (User wants TTS with different server name):
213
+ User: "Use mySpeechTool to say 'good morning'"
214
+ Response: {{"server_name": "mySpeechTool", "tool_name": "text_to_audio", "parameters": {{"text": "good morning"}}}}
215
+
216
+ Example 3 (User does not want a tool):
217
+ User: "What is the capital of France?"
218
+ Response: NO_TOOL_NEEDED"""
 
 
 
 
219
 
220
  try:
221
+ response = client.chat_completion(
222
+ model=model_to_use,
 
223
  messages=[
224
+ {"role": "system", "content": analysis_system_prompt},
225
+ {"role": "user", "content": message}
226
  ],
227
+ temperature=0.1,
228
+ max_tokens=300
 
229
  )
230
 
231
+ analysis = response.choices[0].message.content.strip()
232
+ print(f"Tool analysis LLM response: '{analysis}'")
233
 
234
+ if analysis == "NO_TOOL_NEEDED":
 
235
  return None, None
236
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
237
  try:
238
+ tool_call = json.loads(analysis)
239
+ if isinstance(tool_call, dict) and "server_name" in tool_call and "tool_name" in tool_call:
240
+ return tool_call.get("server_name"), {
241
+ "tool_name": tool_call.get("tool_name"),
242
+ "parameters": tool_call.get("parameters", {})
 
243
  }
244
  else:
245
+ print(f"LLM response for tool call was not a valid JSON with required keys: {analysis}")
246
  return None, None
247
+ except json.JSONDecodeError:
248
+ print(f"Failed to parse tool call JSON from LLM: {analysis}")
249
  return None, None
250
 
251
  except Exception as e:
252
+ print(f"Error analyzing message for tool calls: {str(e)}")
 
 
253
  return None, None
254
 
255
  def respond(
256
+ message,
257
+ image_files,
258
+ history: list[tuple[str, str]],
259
+ system_message,
260
+ max_tokens,
261
+ temperature,
262
+ top_p,
263
+ frequency_penalty,
264
+ seed,
265
+ provider,
266
+ custom_api_key,
267
+ custom_model,
268
+ model_search_term,
269
+ selected_model,
270
+ mcp_enabled=False,
271
+ active_mcp_servers=None,
272
+ mcp_interaction_mode="Natural Language"
273
  ):
274
+ print(f"Received message: {message}")
275
+ print(f"Received {len(image_files) if image_files else 0} images")
276
+ # print(f"History: {history}") # Can be verbose
277
+ print(f"System message: {system_message}")
278
+ print(f"Max tokens: {max_tokens}, Temperature: {temperature}, Top-P: {top_p}")
279
+ print(f"Frequency Penalty: {frequency_penalty}, Seed: {seed}")
280
+ print(f"Selected provider: {provider}")
281
+ print(f"Custom API Key provided: {bool(custom_api_key.strip())}")
282
+ print(f"Selected model (custom_model): {custom_model}")
283
+ print(f"Model search term: {model_search_term}")
284
+ print(f"Selected model from radio: {selected_model}")
285
+ print(f"MCP enabled: {mcp_enabled}")
286
+ print(f"Active MCP servers: {active_mcp_servers}")
287
+ print(f"MCP interaction mode: {mcp_interaction_mode}")
288
+
289
+ token_to_use = custom_api_key if custom_api_key.strip() != "" else ACCESS_TOKEN
290
 
291
+ if custom_api_key.strip() != "":
292
+ print("USING CUSTOM API KEY: BYOK token provided by user is being used for authentication")
293
+ else:
294
+ print("USING DEFAULT API KEY: Environment variable HF_TOKEN is being used for authentication")
295
 
296
+ client = InferenceClient(token=token_to_use, provider=provider)
297
+ print(f"Hugging Face Inference Client initialized with {provider} provider.")
298
+
299
+ if seed == -1:
300
+ seed = None
301
+
302
+ model_to_use = custom_model.strip() if custom_model.strip() != "" else selected_model
303
+ print(f"Model selected for inference: {model_to_use}")
304
+
305
+ if mcp_enabled and message:
306
+ if message.startswith("/mcp"):
307
+ command_parts = message.split(" ", 3)
 
308
  if len(command_parts) < 3:
309
  yield "Invalid MCP command. Format: /mcp <server_name> <tool_name> [arguments_json]"
310
  return
311
 
312
+ _, server_name, tool_name = command_parts[:3]
313
+ args_json = "{}" if len(command_parts) < 4 else command_parts[3]
 
 
314
 
315
  try:
316
+ args_dict = json.loads(args_json)
317
+ result = call_mcp_tool(server_name, tool_name, **args_dict)
318
+
319
+ if isinstance(result, dict) and result.get("type") == "audio_b64":
320
+ yield f"<audio controls src=\"data:audio/wav;base64,{result.get('data')}\"></audio>"
321
+ elif isinstance(result, dict) and "error" in result:
322
+ yield f"Error: {result.get('error')}"
323
+ elif isinstance(result, dict):
324
+ yield json.dumps(result, indent=2)
325
+ else:
326
+ yield str(result)
327
+ return
328
  except json.JSONDecodeError:
329
+ yield f"Invalid JSON arguments: {args_json}"
330
  return
331
+ except Exception as e:
332
+ yield f"Error executing MCP command: {str(e)}"
333
  return
334
+ elif mcp_interaction_mode == "Natural Language" and active_mcp_servers and active_mcp_servers:
335
+ print("Attempting natural language tool call detection...")
336
+ server_name, tool_info = analyze_message_for_tool_call(
337
+ message, active_mcp_servers, client, model_to_use, system_message
 
 
 
 
 
 
338
  )
339
 
340
+ if server_name and tool_info and tool_info.get("tool_name"):
341
+ try:
342
+ print(f"Calling tool via natural language: {server_name}.{tool_info['tool_name']} with parameters: {tool_info['parameters']}")
343
+ result = call_mcp_tool(server_name, tool_info['tool_name'], **tool_info.get('parameters', {}))
344
+
345
+ response_message = f"I used the **{tool_info['tool_name']}** tool from **{server_name}**."
346
+ if isinstance(result, dict) and result.get("message"):
347
+ response_message += f" ({result.get('message')})"
348
+ response_message += "\n\n"
349
+
350
+ if isinstance(result, dict) and result.get("type") == "audio_b64":
351
+ audio_html = f"<audio controls src=\"data:audio/wav;base64,{result.get('data')}\"></audio>"
352
+ yield response_message + audio_html
353
+ elif isinstance(result, dict) and "error" in result:
354
+ result_str = f"Tool Error: {result.get('error')}"
355
+ yield response_message + result_str
356
+ elif isinstance(result, dict):
357
+ result_str = f"Result:\n```json\n{json.dumps(result, indent=2)}\n```"
358
+ yield response_message + result_str
359
+ else:
360
+ result_str = f"Result:\n{str(result)}"
361
+ yield response_message + result_str
362
+ return
363
+ except Exception as e:
364
+ print(f"Error executing MCP tool via natural language: {str(e)}")
365
+ # yield f"Sorry, I encountered an error trying to use the tool: {str(e)}"
366
+ # Fall through to normal LLM response if tool call fails here
367
+ else:
368
+ print("No tool call detected by natural language analysis or tool_info incomplete.")
369
 
370
+
371
+ user_content_parts = []
372
+ if message and message.strip():
373
+ user_content_parts.append({"type": "text", "text": message})
374
 
375
+ if image_files and len(image_files) > 0:
376
+ for img_path in image_files:
377
+ if img_path:
378
+ try:
379
+ encoded_image = encode_image(img_path)
380
+ if encoded_image:
381
+ user_content_parts.append({
382
+ "type": "image_url",
383
+ "image_url": {"url": f"data:image/jpeg;base64,{encoded_image}"}
384
+ })
385
+ except Exception as e:
386
+ print(f"Error encoding image {img_path}: {e}")
387
 
388
+ if not user_content_parts: # If message was only /mcp command and processed
389
+ print("No further content for LLM after MCP command processing.")
390
+ # This might happen if an MCP command was fully handled and returned.
391
+ # If yield was used, the function already exited. If not, we might need to ensure no LLM call.
392
+ # However, the logic above for MCP commands uses `yield ...; return`, so this path might not be hit often.
393
+ # If it *is* hit, it means the MCP command didn't yield, and we should not proceed to LLM.
394
+ if message and message.startswith("/mcp"):
395
+ return # Ensure we don't fall through after a command that should have yielded.
396
+
397
+
398
+ final_user_content = user_content_parts if len(user_content_parts) > 1 else (user_content_parts[0] if user_content_parts else "")
399
+
400
+ augmented_system_message = system_message
401
+ if mcp_enabled and active_mcp_servers:
402
+ tool_list_for_prompt = []
403
+ for server_name_iter in active_mcp_servers:
 
 
 
 
 
 
404
  if server_name_iter in mcp_connections:
405
+ server_tools_str = list_mcp_tools(server_name_iter)
406
+ if server_tools_str and "not connected" not in server_tools_str and "No tools available" not in server_tools_str:
407
+ tool_list_for_prompt.append(f"From server '{server_name_iter}':\n{server_tools_str}")
408
 
409
+ if tool_list_for_prompt:
410
+ mcp_tools_description = "\n\n".join(tool_list_for_prompt)
 
 
 
 
 
411
 
412
+ if mcp_interaction_mode == "Command Mode":
413
+ augmented_system_message += f"\n\nYou have access to the following MCP tools. To use them, type a command in the format: /mcp <server_name> <tool_name> <arguments_json>\nTools:\n{mcp_tools_description}"
414
+ else: # Natural Language
415
+ augmented_system_message += f"\n\nYou have access to the following MCP tools. You can ask to use them in natural language, and I will try to detect when a tool is needed. If I miss it, you can try being more explicit about the tool name.\nTools:\n{mcp_tools_description}"
416
 
417
+ messages_for_api = [{"role": "system", "content": augmented_system_message}]
418
+ print("Initial messages array constructed.")
419
+
420
+ for val in history:
421
+ past_user_msg, past_assistant_msg = val
422
 
423
+ # Handle past user messages (could be text or multimodal)
424
+ if past_user_msg:
425
+ if isinstance(past_user_msg, list): # Already multimodal
426
+ messages_for_api.append({"role": "user", "content": past_user_msg})
427
+ elif isinstance(past_user_msg, str): # Text only
428
+ messages_for_api.append({"role": "user", "content": past_user_msg})
 
 
 
 
 
 
 
429
 
430
+ if past_assistant_msg:
431
+ messages_for_api.append({"role": "assistant", "content": past_assistant_msg})
432
 
433
+ if final_user_content: # Add current user message if it exists
434
+ messages_for_api.append({"role": "user", "content": final_user_content})
435
+
436
+ print(f"Latest user message appended (content type: {type(final_user_content)})")
437
+ # print(f"Full messages_for_api: {json.dumps(messages_for_api, indent=2)}") # Can be very verbose
438
 
439
+ llm_response_text = ""
440
+ print(f"Sending request to {provider} provider for model {model_to_use}.")
441
 
442
+ parameters = {
443
+ "max_tokens": max_tokens,
444
+ "temperature": temperature,
445
+ "top_p": top_p,
446
+ "frequency_penalty": frequency_penalty,
447
  }
448
+
449
+ if seed is not None:
450
+ parameters["seed"] = seed
451
 
 
 
452
  try:
453
+ stream = client.chat_completion(
454
+ model=model_to_use,
455
+ messages=messages_for_api,
456
  stream=True,
457
+ **parameters
458
  )
459
 
460
+ # print("Received tokens: ", end="", flush=True) # Can be too noisy
461
+
462
+ for chunk in stream:
463
  if hasattr(chunk, 'choices') and len(chunk.choices) > 0:
464
+ if hasattr(chunk.choices[0], 'delta') and hasattr(chunk.choices[0].delta, 'content'):
465
+ token_text = chunk.choices[0].delta.content
466
+ if token_text:
467
+ # print(token_text, end="", flush=True) # Can be too noisy
468
+ llm_response_text += token_text
469
+ yield llm_response_text
470
+
471
+ # print() # Newline after tokens
472
+ except Exception as e:
473
+ print(f"Error during LLM inference: {e}")
474
+ llm_response_text += f"\nLLM Error: {str(e)}"
475
+ yield llm_response_text
 
 
476
 
477
+ print("Completed LLM response generation.")
478
 
479
 
480
  # GRADIO UI
481
+ with gr.Blocks(theme="Nymbo/Nymbo_Theme") as demo:
 
482
  chatbot = gr.Chatbot(
 
483
  height=600,
484
  show_copy_button=True,
485
+ placeholder="Select a model and begin chatting. Supports multiple inference providers, multimodal inputs, and MCP tools.",
486
+ layout="panel",
487
+ show_label=False,
488
+ render=False # Delay rendering
489
  )
490
+ print("Chatbot interface created.")
491
 
492
  with gr.Row():
493
+ msg = gr.MultimodalTextbox(
494
+ placeholder="Type a message or upload images...",
495
  show_label=False,
496
  container=False,
497
  scale=12,
498
+ file_types=["image"],
499
+ file_count="multiple",
500
+ sources=["upload"],
501
+ render=False # Delay rendering
502
  )
 
503
 
504
+ chatbot.render()
505
+ msg.render()
506
+
507
+ with gr.Accordion("Settings", open=False):
508
+ system_message_box = gr.Textbox(
509
+ value="You are a helpful AI assistant that can understand images and text. If the user asks you to use a tool, try your best.",
510
+ placeholder="You are a helpful assistant.",
511
+ label="System Prompt"
512
  )
513
 
514
  with gr.Row():
515
  with gr.Column(scale=1):
516
+ max_tokens_slider = gr.Slider(minimum=1, maximum=8192, value=1024, step=1, label="Max tokens")
517
+ temperature_slider = gr.Slider(minimum=0.0, maximum=2.0, value=0.7, step=0.01, label="Temperature")
518
+ top_p_slider = gr.Slider(minimum=0.0, maximum=1.0, value=0.95, step=0.01, label="Top-P")
519
  with gr.Column(scale=1):
520
+ frequency_penalty_slider = gr.Slider(minimum=-2.0, maximum=2.0, value=0.0, step=0.1, label="Frequency Penalty")
521
+ seed_slider = gr.Slider(minimum=-1, maximum=65535, value=-1, step=1, label="Seed (-1 for random)")
522
 
523
+ providers_list = ["hf-inference", "cerebras", "together", "sambanova", "novita", "cohere", "fireworks-ai", "hyperbolic", "nebius"]
524
+ provider_radio = gr.Radio(choices=providers_list, value="hf-inference", label="Inference Provider")
525
+ byok_textbox = gr.Textbox(value="", label="BYOK (Bring Your Own Key)", info="Enter a custom Hugging Face API key here. If empty, only 'hf-inference' provider can be used with the shared token.", placeholder="Enter your Hugging Face API token", type="password")
526
+ custom_model_box = gr.Textbox(value="", label="Custom Model ID", info="(Optional) Provide a custom Hugging Face model ID. Overrides selected featured model.", placeholder="meta-llama/Llama-3.1-70B-Instruct")
 
 
 
 
 
527
 
528
+ model_search_box = gr.Textbox(label="Filter Featured Models", placeholder="Search for a featured model...", lines=1)
529
 
530
+ models_list = [
531
+ "meta-llama/Llama-3.1-405B-Instruct-FP8", # Large model, might be slow/expensive
532
+ "meta-llama/Llama-3.1-70B-Instruct",
533
+ "meta-llama/Llama-3.1-8B-Instruct",
534
  "mistralai/Mistral-Nemo-Instruct-2407",
535
+ "Qwen/Qwen2-72B-Instruct",
536
+ "Qwen/Qwen2-57B-A14B-Instruct",
537
+ "CohereForAI/c4ai-command-r-plus",
538
+ # Multimodal models
539
+ "Salesforce/LlavaLlama3-8b-hf",
540
+ "llava-hf/llava-v1.6-mistral-7b-hf",
541
+ "llava-hf/llava-v1.6-vicuna-13b-hf",
542
+ "microsoft/Phi-3-vision-128k-instruct",
543
+ "google/paligemma-3b-mix-448",
544
+ # Older but still popular
545
+ "NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO",
546
+ "mistralai/Mixtral-8x7B-Instruct-v0.1",
547
+ "mistralai/Mistral-7B-Instruct-v0.3",
548
  ]
549
+ featured_model_radio = gr.Radio(label="Select a Featured Model", choices=models_list, value="meta-llama/Llama-3.1-8B-Instruct", interactive=True)
550
+ gr.Markdown("[View all Text-to-Text models](https://huggingface.co/models?pipeline_tag=text-generation&sort=trending) | [View all multimodal models](https://huggingface.co/models?pipeline_tag=image-to-text&sort=trending)")
 
551
 
552
+ with gr.Accordion("MCP Settings", open=False):
553
+ mcp_enabled_checkbox = gr.Checkbox(label="Enable MCP Support", value=False, info="Enable Model Context Protocol support to connect to external tools and services")
 
554
  with gr.Row():
555
+ mcp_server_url = gr.Textbox(label="MCP Server URL", placeholder="https://your-mcp-server.hf.space/gradio_api/mcp/sse", info="URL of the MCP server (usually ends with /gradio_api/mcp/sse for Gradio MCP servers)")
556
+ mcp_server_name = gr.Textbox(label="Server Name (Optional)", placeholder="e.g., kokoroTTS", info="A friendly name to identify this server")
557
+ mcp_connect_button = gr.Button("Connect to MCP Server")
558
 
559
+ mcp_status = gr.Textbox(label="MCP Connection Status", placeholder="No MCP servers connected", interactive=False)
560
+ active_mcp_servers = gr.Dropdown(label="Active MCP Servers for Chat", choices=[], multiselect=True, info="Select which connected MCP servers to make available to the LLM for this chat session")
561
+ mcp_mode = gr.Radio(label="MCP Interaction Mode", choices=["Natural Language", "Command Mode"], value="Natural Language", info="Choose how to interact with MCP tools")
562
 
563
+ gr.Markdown("""
564
+ ### MCP Interaction Modes & Examples
565
+ **Natural Language Mode**: Describe what you want.
566
+ `Please say 'Hello world' using the kokoroTTS server.`
567
+ `Use my speech tool to read this: "Welcome"`
568
 
569
+ **Command Mode**: Use structured commands (server name must match connected server's friendly name).
570
+ `/mcp <server_name> <tool_name> {"param1": "value1"}`
571
+ Example: `/mcp kokoroTTS text_to_audio {"text": "Hello world", "speed": 1.0}`
572
+ """)
573
+
574
+ # Chat history state
575
+ # The chatbot component itself manages history for display.
576
+ # The `respond` function receives this display history and reconstructs API history.
577
+
578
+ def filter_models_ui_update(search_term):
579
+ print(f"Filtering models with search term: {search_term}")
580
+ filtered = [m for m in models_list if search_term.lower() in m.lower()]
581
+ if not filtered: # If search yields no results, show all models
582
+ filtered = models_list
583
+ print(f"Filtered models: {filtered}")
584
+ return gr.Radio(choices=filtered, label="Select a Featured Model", value=featured_model_radio.value if featured_model_radio.value in filtered else (filtered[0] if filtered else None))
585
+
586
+ def set_custom_model_from_radio_ui_update(selected_featured_model):
587
+ print(f"Featured model selected: {selected_featured_model}")
588
+ return selected_featured_model # This updates the custom_model_box
589
+
590
+ def connect_mcp_server_ui_update(url, name_optional):
591
+ actual_name, status_msg = connect_to_mcp_server(url, name_optional)
592
+ updated_server_choices = list(mcp_connections.keys())
593
+ # Keep existing selection if possible
594
+ current_selection = active_mcp_servers.value if active_mcp_servers.value else []
595
+ valid_selection = [s for s in current_selection if s in updated_server_choices]
596
+ if actual_name and actual_name not in valid_selection: # Auto-select newly connected server
597
+ valid_selection.append(actual_name)
598
+
599
+ return status_msg, gr.Dropdown(choices=updated_server_choices, value=valid_selection, label="Active MCP Servers for Chat")
600
+
601
+ # This function processes the user's multimodal input and adds it to the chatbot history.
602
+ # It prepares the history in a way that `bot` can understand.
603
+ def handle_user_input(multimodal_input, history_list: list):
604
+ text_content = multimodal_input.get("text", "").strip()
605
+ files = multimodal_input.get("files", [])
606
+
607
+ # This will be the entry for the user's turn in the history
608
+ user_turn_for_api = []
609
+ user_turn_for_display = ""
610
 
611
+ if text_content:
612
+ user_turn_for_api.append({"type": "text", "text": text_content})
613
+ user_turn_for_display = text_content
614
+
615
+ if files:
616
+ display_files_md = ""
617
+ for file_path in files:
618
+ if file_path and isinstance(file_path, str): # Gradio provides temp path
619
+ encoded_img = encode_image(file_path) # For API
620
+ if encoded_img:
621
+ user_turn_for_api.append({"type": "image_url", "image_url": {"url": f"data:image/jpeg;base64,{encoded_img}"}})
622
+ # For display, Gradio handles showing the image from MultimodalTextbox output
623
+ # We'll just make a note in the display string
624
+ display_files_md += f"\n<img src='file={file_path}' style='max-height:150px; display:block;' alt='uploaded image'>" # Gradio can render this!
625
+
626
+ if user_turn_for_display:
627
+ user_turn_for_display += display_files_md
628
+ else:
629
+ user_turn_for_display = display_files_md if display_files_md else "Image(s) uploaded"
630
+
631
+
632
+ if not user_turn_for_display and not user_turn_for_api: # Empty input
633
+ return history_list, multimodal_input # No change
634
 
635
+ # The `respond` function expects history as list of [user_api_content, assistant_text_content]
636
+ # For the current turn, we add [user_api_content, None]
637
+ # The display history for chatbot is [user_display_content, assistant_text_content]
638
 
639
+ # We pass the API-formatted user turn to the `message` arg of `respond`
640
+ # and the existing history to the `history` arg.
641
+ # The chatbot's display history is updated here.
642
+
643
+ history_list.append([user_turn_for_display, None])
644
+ return history_list, user_turn_for_api # Return updated history and the API formatted current message
 
 
 
 
 
 
 
645
 
 
 
646
 
647
+ # The bot function that calls `respond` generator
648
+ def call_bot_responder(history_list_for_display, current_user_api_content, sys_msg, max_tok, temp, top_p_val, freq_pen, seed_val, prov, api_key_val, cust_model, _search, sel_model, mcp_on, active_servs, mcp_inter_mode):
649
+ if not current_user_api_content and not (history_list_for_display and history_list_for_display[-1][0]):
650
+ print("Bot called with no current message and no history, skipping.")
651
+ yield history_list_for_display # No change
652
+ return
653
 
654
+ # Reconstruct API history from display history
655
+ # `respond` expects history as list of [user_api_content, assistant_text_content]
656
+ # The current `history_list_for_display` is [user_display, assistant_text]
657
+ # This reconstruction is tricky because display != api format.
658
+ # For simplicity, we'll pass only the text part of history to `respond` for now,
659
+ # and the full current_user_api_content for the current message.
660
+ # A more robust solution would store API history separately.
661
 
662
+ # Simplified history for `respond` (text only from past turns)
663
+ # The `respond` function itself needs to be robust to handle this.
664
+ # Let's adjust `respond` to take `message` (current API content) and `image_files` (current files)
665
+ # and `history` (past turns, which we'll simplify here).
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
666
 
667
+ # The `respond` function is already structured to take `message` (text) and `image_files`
668
+ # The `current_user_api_content` is what we need to pass as `message` (if text) or `image_files`
 
 
669
 
670
+ current_message_text = ""
671
+ current_image_paths = []
672
+
673
+ if isinstance(current_user_api_content, list): # Multimodal
674
+ for part in current_user_api_content:
675
+ if part["type"] == "text":
676
+ current_message_text = part["text"]
677
+ elif part["type"] == "image_url":
678
+ # We can't easily get back the path from base64 for `respond`'s current design
679
+ # This indicates a slight mismatch. `respond` expects paths for current images.
680
+ # For now, let's assume `respond` can handle base64 if passed correctly.
681
+ # Or, we modify `handle_user_input` to also pass original paths if needed by `respond`.
682
+ # Let's assume `respond`'s `image_files` param can take base64 strings for now.
683
+ # This is a simplification.
684
+ # The `encode_image` in `respond` expects paths.
685
+ # For now, we'll pass None for image_files if it's already in current_user_api_content.
686
+ # This part needs careful review of how `respond` handles current images.
687
+ # The `respond` function's `image_files` parameter is for new uploads.
688
+ # If `current_user_api_content` already has encoded images, `respond` should use that.
689
+ # The `respond` function's first two args are `message` (text) and `image_files` (paths).
690
+ # We need to extract these from `current_user_api_content`.
691
+ pass # Images are part of `current_user_api_content` which is passed to `messages_for_api`
692
+ elif isinstance(current_user_api_content, str): # Text only
693
+ current_message_text = current_user_api_content
694
+
695
+ # Simplified history for `respond` (text from display)
696
+ # `respond` will reconstruct its own API history.
697
+ simplified_past_history = []
698
+ if len(history_list_for_display) > 1: # Exclude current turn
699
+ for user_disp, assistant_text in history_list_for_display[:-1]:
700
+ # Extract text from user_disp for simplified history
701
+ user_text_for_hist = user_disp
702
+ if isinstance(user_disp, str) and "<img src" in user_disp : # Basic check if it was image display
703
+ # Try to find text part if any, otherwise empty
704
+ lines = user_disp.splitlines()
705
+ text_lines = [line for line in lines if not line.strip().startswith("<img")]
706
+ user_text_for_hist = "\n".join(text_lines).strip() if text_lines else ""
707
+
708
+ simplified_past_history.append([user_text_for_hist, assistant_text])
709
+
710
+
711
+ # The `respond` function's first argument is `message` (current text)
712
+ # and `image_files` (current image paths).
713
+ # We need to extract these from `current_user_api_content` if it was prepared by `handle_user_input`.
714
+ # For now, let's assume `respond` will get the full `current_user_api_content` via `messages_for_api`.
715
+ # The first two args of `respond` are for the *current* turn's text and image paths.
716
+
717
+ # Let's get current text and image paths from `current_user_api_content`
718
+ # This is slightly redundant as `respond` also reconstructs this, but for clarity:
719
+ _current_text_for_respond = ""
720
+ _current_image_paths_for_respond = [] # `respond` expects paths
721
+
722
+ if isinstance(current_user_api_content, list):
723
+ for item in current_user_api_content:
724
+ if item['type'] == 'text':
725
+ _current_text_for_respond = item['text']
726
+ # We can't get paths back from base64 easily.
727
+ # This highlights that `respond` needs to be able to take already processed multimodal content.
728
+ # For now, we'll assume `respond` internally uses the `messages_for_api` which has the full content.
729
+ # So, we can pass `_current_text_for_respond` and `None` for image_files if images are already in API format.
730
+
731
+
732
+ bot_response_stream = respond(
733
+ message=_current_text_for_respond, # Current text
734
+ image_files=None, # Assume images are handled by messages_for_api construction in respond
735
+ history=simplified_past_history, # Past turns
736
+ system_message=sys_msg,
737
+ max_tokens=max_tok,
738
+ temperature=temp,
739
+ top_p=top_p_val,
740
+ frequency_penalty=freq_pen,
741
+ seed=seed_val,
742
+ provider=prov,
743
+ custom_api_key=api_key_val,
744
+ custom_model=cust_model,
745
+ model_search_term="", # Not directly used by respond
746
+ selected_model=sel_model,
747
+ mcp_enabled=mcp_on,
748
+ active_mcp_servers=active_servs,
749
+ mcp_interaction_mode=mcp_inter_mode
750
+ )
751
+
752
+ for response_chunk in bot_response_stream:
753
+ history_list_for_display[-1][1] = response_chunk
754
+ yield history_list_for_display
755
 
 
 
 
 
 
 
756
 
757
+ # This state will hold the API-formatted content of the current user message
758
+ current_api_message_state = gr.State(None)
759
 
760
+ msg.submit(
761
+ handle_user_input,
762
+ [msg, chatbot], # chatbot here is the history_list
763
+ [chatbot, current_api_message_state] # Update history display and current_api_message_state
764
+ ).then(
765
+ call_bot_responder,
766
+ [chatbot, current_api_message_state, system_message_box, max_tokens_slider, temperature_slider, top_p_slider,
767
+ frequency_penalty_slider, seed_slider, provider_radio, byok_textbox, custom_model_box,
768
+ model_search_box, featured_model_radio, mcp_enabled_checkbox, active_mcp_servers, mcp_mode],
769
+ [chatbot] # Update chatbot display with streaming response
770
+ ).then(
771
+ lambda: gr.MultimodalTextbox(value={"text": "", "files": []}), # Clear MultimodalTextbox
772
+ None,
773
+ [msg]
774
+ )
775
+
776
+ mcp_connect_button.click(
777
+ connect_mcp_server_ui_update,
778
+ [mcp_server_url, mcp_server_name],
779
+ [mcp_status, active_mcp_servers]
780
+ )
781
+
782
+ model_search_box.change(fn=filter_models_ui_update, inputs=model_search_box, outputs=featured_model_radio)
783
+ featured_model_radio.change(fn=set_custom_model_from_radio_ui_update, inputs=featured_model_radio, outputs=custom_model_box)
784
+
785
+ def validate_provider_ui_update(api_key, current_provider):
786
+ if not api_key.strip() and current_provider != "hf-inference":
787
+ gr.Info("No API key provided. Defaulting to 'hf-inference' provider.")
788
+ return gr.Radio(value="hf-inference") # Update provider_radio
789
+ return gr.Radio(value=current_provider) # No change needed or keep current
790
 
791
+ byok_textbox.change(fn=validate_provider_ui_update, inputs=[byok_textbox, provider_radio], outputs=provider_radio)
792
+ provider_radio.change(fn=validate_provider_ui_update, inputs=[byok_textbox, provider_radio], outputs=provider_radio)
793
 
794
+ print("Gradio interface initialized.")
795
 
796
  if __name__ == "__main__":
797
+ print("Launching the demo application.")
798
+ demo.queue().launch(show_api=False, debug=False) # mcp_server=False as this is a client