Nymbo commited on
Commit
6f66243
·
verified ·
1 Parent(s): 13fb3b8

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +516 -730
app.py CHANGED
@@ -5,40 +5,47 @@ import json
5
  import base64
6
  from PIL import Image
7
  import io
8
- import requests
9
- from smolagents.mcp_client import MCPClient
10
 
11
  ACCESS_TOKEN = os.getenv("HF_TOKEN")
12
- print("Access token loaded.")
 
 
 
13
 
14
  # Function to encode image to base64
15
- def encode_image(image_path):
16
- if not image_path:
17
- print("No image path provided")
18
  return None
19
 
20
  try:
21
- print(f"Encoding image from path: {image_path}")
22
-
23
- # If it's already a PIL Image
24
- if isinstance(image_path, Image.Image):
25
- image = image_path
 
 
 
 
26
  else:
27
- # Try to open the image file
28
- image = Image.open(image_path)
29
 
30
- # Convert to RGB if image has an alpha channel (RGBA)
31
  if image.mode == 'RGBA':
32
  image = image.convert('RGB')
33
 
34
- # Encode to base64
35
  buffered = io.BytesIO()
36
- image.save(buffered, format="JPEG")
37
  img_str = base64.b64encode(buffered.getvalue()).decode("utf-8")
38
- print("Image encoded successfully")
39
  return img_str
40
  except Exception as e:
41
  print(f"Error encoding image: {e}")
 
 
42
  return None
43
 
44
  # Dictionary to store active MCP connections
@@ -47,827 +54,606 @@ mcp_connections = {}
47
  def connect_to_mcp_server(server_url, server_name=None):
48
  """Connect to an MCP server and return available tools"""
49
  if not server_url:
50
- return None, "No server URL provided"
51
 
52
  try:
53
- # Create an MCP client and connect to the server
54
- client = MCPClient({"url": server_url})
55
- # Get available tools
56
- tools = client.get_tools()
57
 
58
- # Store the connection for later use
59
- name = server_name or f"Server_{len(mcp_connections)}"
60
  mcp_connections[name] = {"client": client, "tools": tools, "url": server_url}
61
 
62
- return name, f"Successfully connected to {name} with {len(tools)} available tools"
 
63
  except Exception as e:
64
- print(f"Error connecting to MCP server: {e}")
65
- return None, f"Error connecting to MCP server: {str(e)}"
 
 
66
 
67
  def list_mcp_tools(server_name):
68
  """List available tools for a connected MCP server"""
69
  if server_name not in mcp_connections:
70
- return "Server not connected"
71
 
72
  tools = mcp_connections[server_name]["tools"]
73
  tool_info = []
74
  for tool in tools:
75
- tool_info.append(f"- {tool.name}: {tool.description}")
76
 
77
  if not tool_info:
78
- return "No tools available for this server"
79
 
80
  return "\n".join(tool_info)
81
 
82
  def call_mcp_tool(server_name, tool_name, **kwargs):
83
- """Call a specific tool from an MCP server"""
84
  if server_name not in mcp_connections:
85
- return f"Server '{server_name}' not connected"
86
-
87
- client = mcp_connections[server_name]["client"]
88
- tools = mcp_connections[server_name]["tools"]
89
 
90
- # Find the requested tool
91
- tool = next((t for t in tools if t.name == tool_name), None)
92
- if not tool:
93
- return f"Tool '{tool_name}' not found on server '{server_name}'"
94
 
95
  try:
96
- # Call the tool with provided arguments
97
- result = client.call_tool(tool_name, kwargs)
98
- return result
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
99
  except Exception as e:
100
- print(f"Error calling MCP tool: {e}")
101
- return f"Error calling MCP tool: {str(e)}"
 
 
102
 
103
- def analyze_message_for_tool_call(message, active_mcp_servers, client, model_to_use, system_message):
104
  """Analyze a message to determine if an MCP tool should be called"""
105
- # Skip analysis if message is empty
106
- if not message or not message.strip():
107
  return None, None
108
 
109
- # Get information about available tools
110
- tool_info = []
111
- for server_name in active_mcp_servers:
112
- if server_name in mcp_connections:
113
- server_tools = mcp_connections[server_name]["tools"]
114
  for tool in server_tools:
115
- tool_info.append({
116
- "server_name": server_name,
117
- "tool_name": tool.name,
118
- "description": tool.description
119
- })
120
 
121
- if not tool_info:
 
122
  return None, None
123
 
124
- # Create a structured query for the LLM to analyze if a tool call is needed
125
- tools_desc = []
126
- for info in tool_info:
127
- tools_desc.append(f"{info['server_name']}.{info['tool_name']}: {info['description']}")
128
-
129
- tools_string = "\n".join(tools_desc)
130
 
131
- analysis_system_prompt = f"""You are an assistant that helps determine if a user message requires using an external tool.
132
- Available tools:
133
- {tools_string}
134
-
135
- Your job is to:
136
- 1. Analyze the user's message
137
- 2. Determine if they're asking to use one of the tools
138
- 3. If yes, respond with a JSON object with the server_name, tool_name, and parameters
139
- 4. If no, respond with "NO_TOOL_NEEDED"
140
-
141
- Example 1:
142
- User: "Please turn this text into speech: Hello world"
143
- Response: {{"server_name": "kokoroTTS", "tool_name": "text_to_audio", "parameters": {{"text": "Hello world", "speed": 1.0}}}}
144
-
145
- Example 2:
146
- User: "What is the capital of France?"
147
- Response: NO_TOOL_NEEDED"""
 
 
 
 
 
 
 
 
 
 
148
 
149
  try:
150
- # Call the LLM to analyze the message
151
- response = client.chat_completion(
152
- model=model_to_use,
153
  messages=[
154
- {"role": "system", "content": analysis_system_prompt},
155
- {"role": "user", "content": message}
156
  ],
157
- temperature=0.2, # Low temperature for more deterministic responses
158
- max_tokens=300
 
159
  )
160
 
161
- analysis = response.choices[0].message.content
162
- print(f"Tool analysis: {analysis}")
163
 
164
- if "NO_TOOL_NEEDED" in analysis:
 
165
  return None, None
166
 
167
- # Try to extract JSON from the response
168
- json_start = analysis.find("{")
169
- json_end = analysis.rfind("}") + 1
 
 
 
 
 
 
170
 
171
- if json_start < 0 or json_end <= 0:
 
172
  return None, None
173
 
174
- json_str = analysis[json_start:json_end]
175
  try:
176
- tool_call = json.loads(json_str)
177
- return tool_call.get("server_name"), {
178
- "tool_name": tool_call.get("tool_name"),
179
- "parameters": tool_call.get("parameters", {})
180
- }
181
- except json.JSONDecodeError:
182
- print(f"Failed to parse tool call JSON: {json_str}")
 
 
 
 
 
183
  return None, None
184
 
185
  except Exception as e:
186
- print(f"Error analyzing message for tool calls: {str(e)}")
 
 
187
  return None, None
188
 
189
  def respond(
190
- message,
191
- image_files,
192
- history: list[tuple[str, str]],
193
- system_message,
194
- max_tokens,
195
- temperature,
196
- top_p,
197
- frequency_penalty,
198
- seed,
199
- provider,
200
- custom_api_key,
201
- custom_model,
202
- model_search_term,
203
- selected_model,
204
- mcp_enabled=False,
205
- active_mcp_servers=None,
206
- mcp_interaction_mode="Natural Language"
207
  ):
208
- print(f"Received message: {message}")
209
- print(f"Received {len(image_files) if image_files else 0} images")
210
- print(f"History: {history}")
211
- print(f"System message: {system_message}")
212
- print(f"Max tokens: {max_tokens}, Temperature: {temperature}, Top-P: {top_p}")
213
- print(f"Frequency Penalty: {frequency_penalty}, Seed: {seed}")
214
- print(f"Selected provider: {provider}")
215
- print(f"Custom API Key provided: {bool(custom_api_key.strip())}")
216
- print(f"Selected model (custom_model): {custom_model}")
217
- print(f"Model search term: {model_search_term}")
218
- print(f"Selected model from radio: {selected_model}")
219
- print(f"MCP enabled: {mcp_enabled}")
220
- print(f"Active MCP servers: {active_mcp_servers}")
221
- print(f"MCP interaction mode: {mcp_interaction_mode}")
222
-
223
- # Determine which token to use
224
- token_to_use = custom_api_key if custom_api_key.strip() != "" else ACCESS_TOKEN
225
 
226
- if custom_api_key.strip() != "":
227
- print("USING CUSTOM API KEY: BYOK token provided by user is being used for authentication")
228
- else:
229
- print("USING DEFAULT API KEY: Environment variable HF_TOKEN is being used for authentication")
230
 
231
- # Initialize the Inference Client with the provider and appropriate token
232
- client = InferenceClient(token=token_to_use, provider=provider)
233
- print(f"Hugging Face Inference Client initialized with {provider} provider.")
234
-
235
- # Convert seed to None if -1 (meaning random)
236
- if seed == -1:
237
- seed = None
238
-
239
- # Determine which model to use
240
- model_to_use = custom_model.strip() if custom_model.strip() != "" else selected_model
241
- print(f"Model selected for inference: {model_to_use}")
242
-
243
- # Process MCP commands in command mode
244
- if mcp_enabled and message:
245
- if message.startswith("/mcp"): # Always handle explicit commands
246
- # Handle MCP command
247
- command_parts = message.split(" ", 3)
248
  if len(command_parts) < 3:
249
- return "Invalid MCP command. Format: /mcp <server_name> <tool_name> [arguments]"
 
250
 
251
- _, server_name, tool_name = command_parts[:3]
252
- args_json = "{}" if len(command_parts) < 4 else command_parts[3]
 
 
253
 
254
  try:
255
- args_dict = json.loads(args_json)
256
- result = call_mcp_tool(server_name, tool_name, **args_dict)
257
- if isinstance(result, dict):
258
- return json.dumps(result, indent=2)
259
- return str(result)
260
  except json.JSONDecodeError:
261
- return f"Invalid JSON arguments: {args_json}"
262
- except Exception as e:
263
- return f"Error executing MCP command: {str(e)}"
264
- elif mcp_interaction_mode == "Natural Language" and active_mcp_servers:
265
- # Use natural language processing to detect tool calls
266
- server_name, tool_info = analyze_message_for_tool_call(
267
- message,
268
- active_mcp_servers,
269
- client,
270
- model_to_use,
271
- system_message
 
 
 
 
272
  )
273
 
274
- if server_name and tool_info:
275
- try:
276
- # Call the detected tool
277
- print(f"Calling tool via natural language: {server_name}.{tool_info['tool_name']} with parameters: {tool_info['parameters']}")
278
- result = call_mcp_tool(server_name, tool_info['tool_name'], **tool_info['parameters'])
279
-
280
- # Format the response to include what was done
281
- if isinstance(result, dict):
282
- result_str = json.dumps(result, indent=2)
283
- else:
284
- result_str = str(result)
285
-
286
- return f"I used the {tool_info['tool_name']} tool from {server_name} with your request.\n\nResult:\n{result_str}"
287
- except Exception as e:
288
- print(f"Error executing MCP tool via natural language: {str(e)}")
289
- # Continue with normal response if tool call fails
290
-
291
- # Create multimodal content if images are present
292
- if image_files and len(image_files) > 0:
293
- # Process the user message to include images
294
- user_content = []
295
-
296
- # Add text part if there is any
297
- if message and message.strip():
298
- user_content.append({
299
- "type": "text",
300
- "text": message
301
- })
302
-
303
- # Add image parts
304
- for img in image_files:
305
- if img is not None:
306
- # Get raw image data from path
307
- try:
308
- encoded_image = encode_image(img)
309
- if encoded_image:
310
- user_content.append({
311
- "type": "image_url",
312
- "image_url": {
313
- "url": f"data:image/jpeg;base64,{encoded_image}"
314
- }
315
- })
316
- except Exception as e:
317
- print(f"Error encoding image: {e}")
318
- else:
319
- # Text-only message
320
- user_content = message
321
-
322
- # Add information about available MCP tools to the system message if MCP is enabled
323
- augmented_system_message = system_message
324
- if mcp_enabled and active_mcp_servers:
325
- tool_info = []
326
- for server_name in active_mcp_servers:
327
- if server_name in mcp_connections:
328
- server_tools = list_mcp_tools(server_name).split("\n")
329
- tool_info.extend([f"{server_name}: {tool}" for tool in server_tools])
330
-
331
- if tool_info:
332
- mcp_tools_description = "\n".join(tool_info)
333
 
334
- if mcp_interaction_mode == "Command Mode":
335
- augmented_system_message += f"\n\nYou have access to the following MCP tools:\n{mcp_tools_description}\n\nTo use these tools, the user can type a command in the format: /mcp <server_name> <tool_name> <arguments_json>"
336
- else:
337
- augmented_system_message += f"\n\nYou have access to the following MCP tools:\n{mcp_tools_description}\n\nThe user can use these tools by describing what they want in natural language, and the system will automatically detect when to use a tool based on their request."
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
338
 
339
- # Prepare messages in the format expected by the API
340
- messages = [{"role": "system", "content": augmented_system_message}]
341
- print("Initial messages array constructed.")
342
-
343
- # Add conversation history to the context
344
- for val in history:
345
- user_part = val[0]
346
- assistant_part = val[1]
347
- if user_part:
348
- # Handle both text-only and multimodal messages in history
349
- if isinstance(user_part, tuple) and len(user_part) == 2:
350
- # This is a multimodal message with text and images
351
- history_content = []
352
- if user_part[0]: # Text
353
- history_content.append({
354
- "type": "text",
355
- "text": user_part[0]
356
  })
357
-
358
- for img in user_part[1]: # Images
359
- if img:
360
- try:
361
- encoded_img = encode_image(img)
362
- if encoded_img:
363
- history_content.append({
364
- "type": "image_url",
365
- "image_url": {
366
- "url": f"data:image/jpeg;base64,{encoded_img}"
367
- }
368
- })
369
- except Exception as e:
370
- print(f"Error encoding history image: {e}")
371
-
372
- messages.append({"role": "user", "content": history_content})
373
- else:
374
- # Regular text message
375
- messages.append({"role": "user", "content": user_part})
376
- print(f"Added user message to context (type: {type(user_part)})")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
377
 
378
- if assistant_part:
379
- messages.append({"role": "assistant", "content": assistant_part})
380
- print(f"Added assistant message to context: {assistant_part}")
381
-
382
- # Append the latest user message
383
- messages.append({"role": "user", "content": user_content})
384
- print(f"Latest user message appended (content type: {type(user_content)})")
385
-
386
- # Determine which model to use, prioritizing custom_model if provided
387
- model_to_use = custom_model.strip() if custom_model.strip() != "" else selected_model
388
- print(f"Model selected for inference: {model_to_use}")
389
-
390
- # Start with an empty string to build the response as tokens stream in
391
- response = ""
392
- print(f"Sending request to {provider} provider.")
393
-
394
- # Prepare parameters for the chat completion request
395
- parameters = {
396
- "max_tokens": max_tokens,
397
- "temperature": temperature,
398
- "top_p": top_p,
399
- "frequency_penalty": frequency_penalty,
400
  }
401
-
402
- if seed is not None:
403
- parameters["seed"] = seed
404
 
405
- # Use the InferenceClient for making the request
 
406
  try:
407
- # Create a generator for the streaming response
408
- stream = client.chat_completion(
409
- model=model_to_use,
410
- messages=messages,
411
  stream=True,
412
- **parameters
413
  )
414
 
415
- print("Received tokens: ", end="", flush=True)
416
-
417
- # Process the streaming response
418
- for chunk in stream:
419
  if hasattr(chunk, 'choices') and len(chunk.choices) > 0:
420
- # Extract the content from the response
421
- if hasattr(chunk.choices[0], 'delta') and hasattr(chunk.choices[0].delta, 'content'):
422
- token_text = chunk.choices[0].delta.content
423
- if token_text:
424
- print(token_text, end="", flush=True)
425
- response += token_text
426
- yield response
427
-
428
- print()
429
- except Exception as e:
430
- print(f"Error during inference: {e}")
431
- response += f"\nError: {str(e)}"
432
- yield response
 
 
 
433
 
434
- print("Completed response generation.")
435
 
436
  # GRADIO UI
437
- with gr.Blocks(theme="Nymbo/Nymbo_Theme") as demo:
438
- # Create the chatbot component
439
  chatbot = gr.Chatbot(
 
440
  height=600,
441
  show_copy_button=True,
442
- placeholder="Select a model and begin chatting. Now supports multiple inference providers, multimodal inputs, and MCP tools",
443
- layout="panel"
444
- )
445
- print("Chatbot interface created.")
446
-
447
- # Multimodal textbox for messages (combines text and file uploads)
448
- msg = gr.MultimodalTextbox(
449
- placeholder="Type a message or upload images...",
450
- show_label=False,
451
- container=False,
452
- scale=12,
453
- file_types=["image"],
454
- file_count="multiple",
455
- sources=["upload"]
456
  )
457
 
458
- # Create accordion for settings
459
- with gr.Accordion("Settings", open=False):
460
- # System message
461
- system_message_box = gr.Textbox(
462
- value="You are a helpful AI assistant that can understand images and text.",
463
- placeholder="You are a helpful assistant.",
464
- label="System Prompt"
 
 
 
 
 
 
 
 
465
  )
466
 
467
- # Generation parameters
468
  with gr.Row():
469
- with gr.Column():
470
- max_tokens_slider = gr.Slider(
471
- minimum=1,
472
- maximum=4096,
473
- value=512,
474
- step=1,
475
- label="Max tokens"
476
- )
477
-
478
- temperature_slider = gr.Slider(
479
- minimum=0.1,
480
- maximum=4.0,
481
- value=0.7,
482
- step=0.1,
483
- label="Temperature"
484
- )
485
-
486
- top_p_slider = gr.Slider(
487
- minimum=0.1,
488
- maximum=1.0,
489
- value=0.95,
490
- step=0.05,
491
- label="Top-P"
492
- )
493
-
494
- with gr.Column():
495
- frequency_penalty_slider = gr.Slider(
496
- minimum=-2.0,
497
- maximum=2.0,
498
- value=0.0,
499
- step=0.1,
500
- label="Frequency Penalty"
501
- )
502
-
503
- seed_slider = gr.Slider(
504
- minimum=-1,
505
- maximum=65535,
506
- value=-1,
507
- step=1,
508
- label="Seed (-1 for random)"
509
- )
510
-
511
- # Provider selection
512
- providers_list = [
513
- "hf-inference", # Default Hugging Face Inference
514
- "cerebras", # Cerebras provider
515
- "together", # Together AI
516
- "sambanova", # SambaNova
517
- "novita", # Novita AI
518
- "cohere", # Cohere
519
- "fireworks-ai", # Fireworks AI
520
- "hyperbolic", # Hyperbolic
521
- "nebius", # Nebius
522
  ]
 
523
 
524
- provider_radio = gr.Radio(
525
- choices=providers_list,
526
- value="hf-inference",
527
- label="Inference Provider",
528
- )
529
 
530
- # New BYOK textbox
531
- byok_textbox = gr.Textbox(
532
- value="",
533
- label="BYOK (Bring Your Own Key)",
534
- info="Enter a custom Hugging Face API key here. When empty, only 'hf-inference' provider can be used.",
535
- placeholder="Enter your Hugging Face API token",
536
- type="password" # Hide the API key for security
537
- )
538
 
539
- # Custom model box
540
- custom_model_box = gr.Textbox(
541
- value="",
542
- label="Custom Model",
543
- info="(Optional) Provide a custom Hugging Face model path. Overrides any selected featured model.",
544
- placeholder="meta-llama/Llama-3.3-70B-Instruct"
545
- )
546
 
547
- # Model search
548
- model_search_box = gr.Textbox(
549
- label="Filter Models",
550
- placeholder="Search for a featured model...",
551
- lines=1
552
- )
553
-
554
- # Featured models list
555
- # Updated to include multimodal models
556
- models_list = [
557
- "meta-llama/Llama-3.2-11B-Vision-Instruct",
558
- "meta-llama/Llama-3.3-70B-Instruct",
559
- "meta-llama/Llama-3.1-70B-Instruct",
560
- "meta-llama/Llama-3.0-70B-Instruct",
561
- "meta-llama/Llama-3.2-3B-Instruct",
562
- "meta-llama/Llama-3.2-1B-Instruct",
563
- "meta-llama/Llama-3.1-8B-Instruct",
564
- "NousResearch/Hermes-3-Llama-3.1-8B",
565
- "NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO",
566
  "mistralai/Mistral-Nemo-Instruct-2407",
567
- "mistralai/Mixtral-8x7B-Instruct-v0.1",
568
- "mistralai/Mistral-7B-Instruct-v0.3",
569
- "mistralai/Mistral-7B-Instruct-v0.2",
570
- "Qwen/Qwen3-235B-A22B",
571
- "Qwen/Qwen3-32B",
572
- "Qwen/Qwen2.5-72B-Instruct",
573
- "Qwen/Qwen2.5-3B-Instruct",
574
- "Qwen/Qwen2.5-0.5B-Instruct",
575
- "Qwen/QwQ-32B",
576
- "Qwen/Qwen2.5-Coder-32B-Instruct",
577
- "microsoft/Phi-3.5-mini-instruct",
578
- "microsoft/Phi-3-mini-128k-instruct",
579
- "microsoft/Phi-3-mini-4k-instruct",
580
  ]
581
-
582
- featured_model_radio = gr.Radio(
583
- label="Select a model below",
584
- choices=models_list,
585
- value="meta-llama/Llama-3.2-11B-Vision-Instruct", # Default to a multimodal model
586
- interactive=True
587
- )
588
 
589
- gr.Markdown("[View all Text-to-Text models](https://huggingface.co/models?inference_provider=all&pipeline_tag=text-generation&sort=trending) | [View all multimodal models](https://huggingface.co/models?inference_provider=all&pipeline_tag=image-text-to-text&sort=trending)")
590
-
591
- # Create accordion for MCP settings
592
- with gr.Accordion("MCP Settings", open=False):
593
- mcp_enabled_checkbox = gr.Checkbox(
594
- label="Enable MCP Support",
595
- value=False,
596
- info="Enable Model Context Protocol support to connect to external tools and services"
597
- )
598
 
599
  with gr.Row():
600
- mcp_server_url = gr.Textbox(
601
- label="MCP Server URL",
602
- placeholder="https://example-mcp-server.hf.space/gradio_api/mcp/sse",
603
- info="URL of the MCP server to connect to"
604
- )
605
-
606
- mcp_server_name = gr.Textbox(
607
- label="Server Name",
608
- placeholder="Optional name for this server",
609
- info="A friendly name to identify this server"
610
- )
611
-
612
- mcp_connect_button = gr.Button("Connect to MCP Server")
613
 
614
- mcp_status = gr.Textbox(
615
- label="MCP Connection Status",
616
- placeholder="No MCP servers connected",
617
- interactive=False
618
- )
619
 
620
- active_mcp_servers = gr.Dropdown(
621
- label="Active MCP Servers",
622
- choices=[],
623
- multiselect=True,
624
- info="Select which MCP servers to use in chat"
625
  )
626
 
627
- mcp_mode = gr.Radio(
628
- label="MCP Interaction Mode",
629
- choices=["Natural Language", "Command Mode"],
630
- value="Natural Language",
631
- info="Choose how to interact with MCP tools"
632
  )
633
-
634
- gr.Markdown("""
635
- ### MCP Interaction Modes
636
-
637
- **Natural Language Mode**: Simply describe what you want in plain English. Examples:
638
- ```
639
- Please convert the text "Hello world" to speech
640
- Can you read this text aloud: "Welcome to MCP integration"
641
- ```
642
-
643
- **Command Mode**: Use structured commands (for advanced users)
644
- ```
645
- /mcp <server_name> <tool_name> {"param1": "value1", "param2": "value2"}
646
- ```
647
-
648
- Example:
649
- ```
650
- /mcp kokoroTTS text_to_audio {"text": "Hello world", "speed": 1.0}
651
- ```
652
- """)
653
-
654
- # Chat history state
655
- chat_history = gr.State([])
656
-
657
- # Function to filter models
658
- def filter_models(search_term):
659
- print(f"Filtering models with search term: {search_term}")
660
- filtered = [m for m in models_list if search_term.lower() in m.lower()]
661
- print(f"Filtered models: {filtered}")
662
- return gr.update(choices=filtered)
663
-
664
- # Function to set custom model from radio
665
- def set_custom_model_from_radio(selected):
666
- print(f"Featured model selected: {selected}")
667
- return selected
668
-
669
- # Function to connect to MCP server
670
- def connect_mcp_server(url, name):
671
- server_name, status = connect_to_mcp_server(url, name)
672
-
673
- # Update the active servers dropdown
674
- servers = list(mcp_connections.keys())
675
-
676
- # Return the status message and updated server list
677
- return status, gr.update(choices=servers)
678
 
679
- # Function for the chat interface
680
- def user(user_message, history):
681
- # Debug logging for troubleshooting
682
- print(f"User message received: {user_message}")
683
-
684
- # Skip if message is empty (no text and no files)
685
- if not user_message or (not user_message.get("text") and not user_message.get("files")):
686
- print("Empty message, skipping")
687
- return history
688
-
689
- # Prepare multimodal message format
690
- text_content = user_message.get("text", "").strip()
691
- files = user_message.get("files", [])
692
-
693
- print(f"Text content: {text_content}")
694
- print(f"Files: {files}")
695
-
696
- # If both text and files are empty, skip
697
- if not text_content and not files:
698
- print("No content to display")
699
- return history
700
-
701
- # Add message with images to history
702
- if files and len(files) > 0:
703
- # Add text message first if it exists
704
- if text_content:
705
- # Add a separate text message
706
- print(f"Adding text message: {text_content}")
707
- history.append([text_content, None])
708
-
709
- # Then add each image file separately
710
- for file_path in files:
711
- if file_path and isinstance(file_path, str):
712
- print(f"Adding image: {file_path}")
713
- # Add image as a separate message with no text
714
- history.append([f"![Image]({file_path})", None])
715
-
716
- return history
717
- else:
718
- # For text-only messages
719
- print(f"Adding text-only message: {text_content}")
720
- history.append([text_content, None])
721
- return history
722
 
723
- # Define bot response function
724
- def bot(history, system_msg, max_tokens, temperature, top_p, freq_penalty, seed, provider, api_key, custom_model, search_term, selected_model, mcp_enabled, selected_servers, mcp_interaction_mode):
725
- # Check if history is valid
726
- if not history or len(history) == 0:
727
- print("No history to process")
728
- return history
729
-
730
- # Get the most recent message and detect if it's an image
731
- user_message = history[-1][0]
732
- print(f"Processing user message: {user_message}")
733
-
734
- is_image = False
735
- image_path = None
736
- text_content = user_message
737
-
738
- # Check if this is an image message (marked with ![Image])
739
- if isinstance(user_message, str) and user_message.startswith("![Image]("):
740
- is_image = True
741
- # Extract image path from markdown format ![Image](path)
742
- image_path = user_message.replace("![Image](", "").replace(")", "")
743
- print(f"Image detected: {image_path}")
744
- text_content = "" # No text for image-only messages
745
-
746
- # Look back for text context if this is an image
747
- text_context = ""
748
- if is_image and len(history) > 1:
749
- # Use the previous message as context if it's text
750
- prev_message = history[-2][0]
751
- if isinstance(prev_message, str) and not prev_message.startswith("![Image]("):
752
- text_context = prev_message
753
- print(f"Using text context from previous message: {text_context}")
754
-
755
- # Process message through respond function
756
- history[-1][1] = ""
757
 
758
- # Use either the image or text for the API
759
- if is_image:
760
- # For image messages
761
- for response in respond(
762
- text_context, # Text context from previous message if any
763
- [image_path], # Current image
764
- history[:-1], # Previous history
765
- system_msg,
766
- max_tokens,
767
- temperature,
768
- top_p,
769
- freq_penalty,
770
- seed,
771
- provider,
772
- api_key,
773
- custom_model,
774
- search_term,
775
- selected_model,
776
- mcp_enabled,
777
- selected_servers,
778
- mcp_interaction_mode
779
- ):
780
- history[-1][1] = response
781
- yield history
782
- else:
783
- # For text-only messages
784
- for response in respond(
785
- text_content, # Text message
786
- None, # No image
787
- history[:-1], # Previous history
788
- system_msg,
789
- max_tokens,
790
- temperature,
791
- top_p,
792
- freq_penalty,
793
- seed,
794
- provider,
795
- api_key,
796
- custom_model,
797
- search_term,
798
- selected_model,
799
- mcp_enabled,
800
- selected_servers,
801
- mcp_interaction_mode
802
- ):
803
- history[-1][1] = response
804
- yield history
805
-
806
- # Update function for provider validation based on BYOK
807
- def validate_provider(api_key, provider):
808
- if not api_key.strip() and provider != "hf-inference":
809
- return gr.update(value="hf-inference")
810
- return gr.update(value=provider)
811
-
812
- # Event handlers
813
- msg.submit(
814
- user,
815
- [msg, chatbot],
816
- [chatbot],
817
- queue=False
818
- ).then(
819
- bot,
820
- [chatbot, system_message_box, max_tokens_slider, temperature_slider, top_p_slider,
821
- frequency_penalty_slider, seed_slider, provider_radio, byok_textbox, custom_model_box,
822
- model_search_box, featured_model_radio, mcp_enabled_checkbox, active_mcp_servers, mcp_mode],
823
- [chatbot]
824
- ).then(
825
- lambda: {"text": "", "files": []}, # Clear inputs after submission
826
- None,
827
- [msg]
828
- )
829
-
830
- # Connect MCP connect button
831
- mcp_connect_button.click(
832
- connect_mcp_server,
833
- [mcp_server_url, mcp_server_name],
834
- [mcp_status, active_mcp_servers]
835
- )
836
 
837
- # Connect the model filter to update the radio choices
838
- model_search_box.change(
839
- fn=filter_models,
840
- inputs=model_search_box,
841
- outputs=featured_model_radio
 
 
 
 
 
 
 
 
 
842
  )
843
- print("Model search box change event linked.")
844
 
845
- # Connect the featured model radio to update the custom model box
846
- featured_model_radio.change(
847
- fn=set_custom_model_from_radio,
848
- inputs=featured_model_radio,
849
- outputs=custom_model_box
850
- )
851
- print("Featured model radio button change event linked.")
852
-
853
- # Connect the BYOK textbox to validate provider selection
854
- byok_textbox.change(
855
- fn=validate_provider,
856
- inputs=[byok_textbox, provider_radio],
857
- outputs=provider_radio
 
 
 
 
858
  )
859
- print("BYOK textbox change event linked.")
860
 
861
- # Also validate provider when the radio changes to ensure consistency
862
- provider_radio.change(
863
- fn=validate_provider,
864
- inputs=[byok_textbox, provider_radio],
865
- outputs=provider_radio
866
- )
867
- print("Provider radio button change event linked.")
 
 
 
 
 
 
 
 
 
 
 
868
 
869
- print("Gradio interface initialized.")
870
 
871
  if __name__ == "__main__":
872
- print("Launching the demo application.")
873
- demo.launch(show_api=True, mcp_server=False) # Not launching as MCP server as we're the client
 
5
  import base64
6
  from PIL import Image
7
  import io
8
+ import requests # Keep for potential future use, though not directly used in core logic now
9
+ from smolagents.mcp_client import MCPClient # Ensure this is correctly installed and importable
10
 
11
  ACCESS_TOKEN = os.getenv("HF_TOKEN")
12
+ if ACCESS_TOKEN:
13
+ print("Access token loaded from HF_TOKEN environment variable.")
14
+ else:
15
+ print("Warning: HF_TOKEN environment variable not set. Some operations might fail.")
16
 
17
  # Function to encode image to base64
18
+ def encode_image(image_path_or_pil):
19
+ if not image_path_or_pil:
20
+ print("No image path or PIL Image provided")
21
  return None
22
 
23
  try:
24
+ if isinstance(image_path_or_pil, Image.Image):
25
+ image = image_path_or_pil
26
+ print(f"Encoding PIL Image object.")
27
+ elif isinstance(image_path_or_pil, str):
28
+ print(f"Encoding image from path: {image_path_or_pil}")
29
+ if not os.path.exists(image_path_or_pil):
30
+ print(f"Error: Image file not found at {image_path_or_pil}")
31
+ return None
32
+ image = Image.open(image_path_or_pil)
33
  else:
34
+ print(f"Error: Unsupported image input type: {type(image_path_or_pil)}")
35
+ return None
36
 
 
37
  if image.mode == 'RGBA':
38
  image = image.convert('RGB')
39
 
 
40
  buffered = io.BytesIO()
41
+ image.save(buffered, format="JPEG") # Or PNG if preferred, ensure consistency
42
  img_str = base64.b64encode(buffered.getvalue()).decode("utf-8")
43
+ print("Image encoded successfully to base64.")
44
  return img_str
45
  except Exception as e:
46
  print(f"Error encoding image: {e}")
47
+ import traceback
48
+ traceback.print_exc()
49
  return None
50
 
51
  # Dictionary to store active MCP connections
 
54
  def connect_to_mcp_server(server_url, server_name=None):
55
  """Connect to an MCP server and return available tools"""
56
  if not server_url:
57
+ return None, "No server URL provided. Please enter a valid URL."
58
 
59
  try:
60
+ print(f"Attempting to connect to MCP server at URL: {server_url}")
61
+ client = MCPClient({"url": server_url}) # This might block or raise if connection fails
62
+ tools = client.get_tools() # This should also be a blocking call until tools are fetched
 
63
 
64
+ name = server_name.strip() if server_name and server_name.strip() else f"Server_{len(mcp_connections) + 1}"
 
65
  mcp_connections[name] = {"client": client, "tools": tools, "url": server_url}
66
 
67
+ print(f"Successfully connected to MCP server: {name} with {len(tools)} tools.")
68
+ return name, f"Successfully connected to '{name}' ({server_url}). Found {len(tools)} tool(s)."
69
  except Exception as e:
70
+ print(f"Error connecting to MCP server at {server_url}: {e}")
71
+ import traceback
72
+ traceback.print_exc()
73
+ return None, f"Error connecting to MCP server '{server_url}': {str(e)}"
74
 
75
  def list_mcp_tools(server_name):
76
  """List available tools for a connected MCP server"""
77
  if server_name not in mcp_connections:
78
+ return "Server not connected or name not found."
79
 
80
  tools = mcp_connections[server_name]["tools"]
81
  tool_info = []
82
  for tool in tools:
83
+ tool_info.append(f"- **{tool.name}**: {tool.description}")
84
 
85
  if not tool_info:
86
+ return "No tools available for this server."
87
 
88
  return "\n".join(tool_info)
89
 
90
  def call_mcp_tool(server_name, tool_name, **kwargs):
91
+ """Call a specific tool from an MCP server and process its result."""
92
  if server_name not in mcp_connections:
93
+ return {"type": "error", "message": f"Server '{server_name}' not connected."}
 
 
 
94
 
95
+ mcp_client_instance = mcp_connections[server_name]["client"]
 
 
 
96
 
97
  try:
98
+ print(f"Calling MCP tool: {server_name}.{tool_name} with args: {kwargs}")
99
+ # Assuming mcp_client_instance.call_tool returns an mcp.client.tool.ToolResult object
100
+ tool_result = mcp_client_instance.call_tool(tool_name, kwargs)
101
+
102
+ if tool_result and tool_result.content:
103
+ # Process multiple blocks if present, concatenating text or prioritizing audio
104
+ audio_block_found = None
105
+ text_parts = []
106
+ json_parts = []
107
+ other_parts = []
108
+
109
+ for block in tool_result.content:
110
+ if hasattr(block, 'uri') and isinstance(block.uri, str) and block.uri.startswith('data:audio/'):
111
+ audio_block_found = {
112
+ "type": "audio",
113
+ "data_uri": block.uri,
114
+ "name": getattr(block, 'name', 'audio_output.wav')
115
+ }
116
+ break # Prioritize first audio block
117
+ elif hasattr(block, 'text') and block.text is not None:
118
+ text_parts.append(str(block.text))
119
+ elif hasattr(block, 'json_data') and block.json_data is not None:
120
+ try:
121
+ json_parts.append(json.dumps(block.json_data, indent=2))
122
+ except TypeError:
123
+ json_parts.append(str(block.json_data)) # Fallback
124
+ else:
125
+ other_parts.append(str(block))
126
+
127
+ if audio_block_found:
128
+ print(f"MCP tool returned audio: {audio_block_found['name']}")
129
+ return audio_block_found
130
+ elif text_parts:
131
+ full_text = "\n".join(text_parts)
132
+ print(f"MCP tool returned text: {full_text[:100]}...")
133
+ return {"type": "text", "value": full_text}
134
+ elif json_parts:
135
+ full_json_str = "\n".join(json_parts)
136
+ print(f"MCP tool returned JSON string.")
137
+ return {"type": "json_string", "value": full_json_str} # Treat as string for display
138
+ elif other_parts:
139
+ print(f"MCP tool returned other content types.")
140
+ return {"type": "text", "value": "\n".join(other_parts)}
141
+ else:
142
+ print("MCP tool executed but returned no interpretable primary content blocks.")
143
+ return {"type": "text", "value": "Tool executed, but returned no standard content (audio/text/json)."}
144
+
145
+ print("MCP tool executed, but ToolResult or its content was empty.")
146
+ return {"type": "text", "value": "Tool executed, but returned no content."}
147
  except Exception as e:
148
+ print(f"Error calling MCP tool '{tool_name}' or processing its result: {e}")
149
+ import traceback
150
+ traceback.print_exc()
151
+ return {"type": "error", "message": f"Error during MCP tool '{tool_name}' execution: {str(e)}"}
152
 
153
+ def analyze_message_for_tool_call(message, active_mcp_servers, llm_client, llm_model_to_use, base_system_message):
154
  """Analyze a message to determine if an MCP tool should be called"""
155
+ if not message or not message.strip() or not active_mcp_servers:
 
156
  return None, None
157
 
158
+ tool_info_for_llm = []
159
+ for server_name_iter in active_mcp_servers:
160
+ if server_name_iter in mcp_connections:
161
+ server_tools = mcp_connections[server_name_iter]["tools"]
 
162
  for tool in server_tools:
163
+ # Provide a concise description for the LLM
164
+ tool_info_for_llm.append(
165
+ f"- Server: '{server_name_iter}', Tool: '{tool.name}', Description: '{tool.description}'"
166
+ )
 
167
 
168
+ if not tool_info_for_llm:
169
+ print("No active MCP tools found for analysis.")
170
  return None, None
171
 
172
+ tools_string_for_llm = "\n".join(tool_info_for_llm)
 
 
 
 
 
173
 
174
+ # More robust system prompt for tool detection
175
+ analysis_system_prompt = f"""You are an expert assistant that determines if a user's request requires an external tool.
176
+ You have access to the following tools:
177
+ {tools_string_for_llm}
178
+
179
+ Based on the user's message, decide if any of these tools are appropriate.
180
+ If a tool is needed, respond ONLY with a JSON object containing:
181
+ "server_name": The name of the server providing the tool.
182
+ "tool_name": The name of the tool to be called.
183
+ "parameters": A dictionary of parameters for the tool, inferred from the user's message. Ensure parameter names match what the tool expects (often 'text', 'query', 'speed', etc.).
184
+
185
+ If NO tool is needed, respond ONLY with the exact string: NO_TOOL_NEEDED
186
+
187
+ Example 1 (TTS tool):
188
+ User: "Can you say 'hello world' for me at a slightly faster speed?"
189
+ Response: {{"server_name": "kokoroTTS", "tool_name": "text_to_audio", "parameters": {{"text": "hello world", "speed": 1.2}}}}
190
+
191
+ Example 2 (File tool):
192
+ User: "Read the content of my_document.txt"
193
+ Response: {{"server_name": "FileSystemServer", "tool_name": "readFile", "parameters": {{"path": "my_document.txt"}}}}
194
+
195
+ Example 3 (No tool):
196
+ User: "What's the weather like today?" (Assuming no weather tool is listed)
197
+ Response: NO_TOOL_NEEDED
198
+
199
+ User's current message is: "{message}"
200
+ Now, provide your decision:"""
201
 
202
  try:
203
+ print(f"Sending tool analysis request to LLM model: {llm_model_to_use}")
204
+ response = llm_client.chat_completion(
205
+ model=llm_model_to_use,
206
  messages=[
207
+ # {"role": "system", "content": base_system_message}, # Optional: provide original system message for context
208
+ {"role": "user", "content": analysis_system_prompt} # The prompt itself is the user message here
209
  ],
210
+ temperature=0.1, # Low temperature for deterministic tool selection
211
+ max_tokens=300,
212
+ stop=["\n\n"] # Stop early if LLM adds extra verbiage
213
  )
214
 
215
+ analysis_text = response.choices[0].message.content.strip()
216
+ print(f"LLM tool analysis response: '{analysis_text}'")
217
 
218
+ if "NO_TOOL_NEEDED" in analysis_text or analysis_text == "NO_TOOL_NEEDED":
219
+ print("LLM determined no tool needed.")
220
  return None, None
221
 
222
+ # Try to extract JSON from the response (handle potential markdown code blocks)
223
+ if analysis_text.startswith("```json"):
224
+ analysis_text = analysis_text.replace("```json", "").replace("```", "").strip()
225
+ elif analysis_text.startswith("```"):
226
+ analysis_text = analysis_text.replace("```", "").strip()
227
+
228
+
229
+ json_start = analysis_text.find("{")
230
+ json_end = analysis_text.rfind("}") + 1
231
 
232
+ if json_start == -1 or json_end <= json_start:
233
+ print(f"Could not find valid JSON object in LLM response: '{analysis_text}'")
234
  return None, None
235
 
236
+ json_str = analysis_text[json_start:json_end]
237
  try:
238
+ tool_call_data = json.loads(json_str)
239
+ if "server_name" in tool_call_data and "tool_name" in tool_call_data:
240
+ print(f"LLM suggested tool call: {tool_call_data}")
241
+ return tool_call_data.get("server_name"), {
242
+ "tool_name": tool_call_data.get("tool_name"),
243
+ "parameters": tool_call_data.get("parameters", {})
244
+ }
245
+ else:
246
+ print(f"LLM response parsed as JSON but missing server_name or tool_name: {json_str}")
247
+ return None, None
248
+ except json.JSONDecodeError as e:
249
+ print(f"Failed to parse tool call JSON from LLM response: '{json_str}'. Error: {e}")
250
  return None, None
251
 
252
  except Exception as e:
253
+ print(f"Error during LLM analysis for tool calls: {str(e)}")
254
+ import traceback
255
+ traceback.print_exc()
256
  return None, None
257
 
258
  def respond(
259
+ message_text_input, # From user function, this is just the text part
260
+ message_files_input, # From user function, this is the list of file paths
261
+ history_tuples: list[tuple[tuple[str, list], str]], # History: list of ((user_text, [user_files]), assistant_response)
262
+ system_message_prompt,
263
+ max_tokens_val,
264
+ temperature_val,
265
+ top_p_val,
266
+ frequency_penalty_val,
267
+ seed_val,
268
+ provider_choice,
269
+ custom_api_key_val,
270
+ custom_model_id,
271
+ # model_search_term_val, # Not directly used in respond, but kept for signature consistency if UI passes it
272
+ selected_hf_model_id,
273
+ mcp_is_enabled,
274
+ active_mcp_server_names, # List of selected server names
275
+ mcp_interaction_mode_choice
276
  ):
277
+ print(f"\n--- RESPOND FUNCTION CALLED ---")
278
+ print(f"Message Text: '{message_text_input}'")
279
+ print(f"Message Files: {message_files_input}")
280
+ # print(f"History (first item type if exists): {type(history_tuples) if history_tuples else 'No history'}")
281
+ print(f"System Prompt: '{system_message_prompt}'")
282
+ print(f"Provider: {provider_choice}, MCP Enabled: {mcp_is_enabled}, MCP Mode: {mcp_interaction_mode_choice}")
283
+ print(f"Active MCP Servers: {active_mcp_server_names}")
284
+
285
+ token_to_use_for_llm = custom_api_key_val if custom_api_key_val.strip() else ACCESS_TOKEN
286
+ if not token_to_use_for_llm and provider_choice != "hf-inference": # Basic check
287
+ yield "Error: API Key required for non-hf-inference providers."
288
+ return
 
 
 
 
 
289
 
290
+ llm_client_instance = InferenceClient(token=token_to_use_for_llm, provider=provider_choice)
 
 
 
291
 
292
+ current_seed = None if seed_val == -1 else seed_val
293
+ model_id_for_llm = custom_model_id.strip() if custom_model_id.strip() else selected_hf_model_id
294
+ print(f"Using LLM model: {model_id_for_llm} via {provider_choice}")
295
+
296
+ # --- MCP Tool Call Logic ---
297
+ if mcp_is_enabled and (message_text_input or message_files_input) and active_mcp_server_names:
298
+ tool_call_output_dict = None
299
+ invoked_tool_display_name = "a tool"
300
+ invoked_server_display_name = "an MCP server"
301
+
302
+ if message_text_input and message_text_input.startswith("/mcp"):
303
+ print("Processing explicit MCP command...")
304
+ command_parts = message_text_input.split(" ", 3)
 
 
 
 
305
  if len(command_parts) < 3:
306
+ yield "Invalid MCP command. Format: /mcp <server_name> <tool_name> [arguments_json]"
307
+ return
308
 
309
+ _, server_name_cmd, tool_name_cmd = command_parts[:3]
310
+ invoked_server_display_name = server_name_cmd
311
+ invoked_tool_display_name = tool_name_cmd
312
+ args_json_str = "{}" if len(command_parts) < 4 else command_parts
313
 
314
  try:
315
+ args_dict_cmd = json.loads(args_json_str)
316
+ tool_call_output_dict = call_mcp_tool(invoked_server_display_name, invoked_tool_display_name, **args_dict_cmd)
 
 
 
317
  except json.JSONDecodeError:
318
+ yield f"Invalid JSON arguments for MCP command: {args_json_str}"
319
+ return
320
+ except Exception as e_cmd:
321
+ yield f"Error preparing MCP command: {str(e_cmd)}"
322
+ return
323
+
324
+ elif mcp_interaction_mode_choice == "Natural Language":
325
+ print("Analyzing message for natural language tool call...")
326
+ # For natural language, primarily use message_text_input. Files could be context later.
327
+ detected_server_nl, tool_info_nl = analyze_message_for_tool_call(
328
+ message_text_input,
329
+ active_mcp_server_names,
330
+ llm_client_instance,
331
+ model_id_for_llm,
332
+ system_message_prompt
333
  )
334
 
335
+ if detected_server_nl and tool_info_nl and tool_info_nl.get("tool_name"):
336
+ invoked_server_display_name = detected_server_nl
337
+ invoked_tool_display_name = tool_info_nl['tool_name']
338
+ tool_params_nl = tool_info_nl.get("parameters", {})
339
+ tool_call_output_dict = call_mcp_tool(invoked_server_display_name, invoked_tool_display_name, **tool_params_nl)
340
+
341
+ # --- Handle MCP Tool Result (if a tool was called) ---
342
+ if tool_call_output_dict:
343
+ response_message_parts = [f"I attempted to use the **{invoked_tool_display_name}** tool from **{invoked_server_display_name}**."]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
344
 
345
+ if tool_call_output_dict.get("type") == "audio":
346
+ audio_data_uri = tool_call_output_dict["data_uri"]
347
+ audio_html_tag = f"<audio controls src='{audio_data_uri}' title='{tool_call_output_dict.get('name', 'Audio Output')}'></audio>"
348
+ response_message_parts.append(f"Here's the audio output:\n{audio_html_tag}")
349
+ elif tool_call_output_dict.get("type") == "text":
350
+ response_message_parts.append(f"\nResult:\n```\n{tool_call_output_dict['value']}\n```")
351
+ elif tool_call_output_dict.get("type") == "json_string": # Changed from "json" to avoid confusion with dict
352
+ response_message_parts.append(f"\nResult (JSON):\n```json\n{tool_call_output_dict['value']}\n```")
353
+ elif tool_call_output_dict.get("type") == "error":
354
+ response_message_parts.append(f"\nUnfortunately, there was an error: {tool_call_output_dict['message']}")
355
+ else: # Fallback for unexpected result structure
356
+ response_message_parts.append(f"\nThe tool returned: {str(tool_call_output_dict)}")
357
+
358
+ yield "\n".join(response_message_parts)
359
+ return # End here if a tool was called and processed
360
+
361
+ # --- Regular LLM Response Logic (if no MCP tool was successfully called and returned primary content) ---
362
+ print("Proceeding with standard LLM response generation.")
363
+
364
+ # Prepare current user message for LLM (multimodal if files exist)
365
+ current_user_llm_content = []
366
+ if message_text_input and message_text_input.strip():
367
+ current_user_llm_content.append({"type": "text", "text": message_text_input})
368
 
369
+ if message_files_input:
370
+ for file_path in message_files_input:
371
+ if file_path: # file_path is already the actual temp path from gr.File or gr.Image
372
+ encoded_img_str = encode_image(file_path)
373
+ if encoded_img_str:
374
+ current_user_llm_content.append({
375
+ "type": "image_url",
376
+ "image_url": {"url": f"data:image/jpeg;base64,{encoded_img_str}"}
 
 
 
 
 
 
 
 
 
377
  })
378
+ else:
379
+ print(f"Warning: Failed to encode image {file_path} for LLM.")
380
+
381
+ if not current_user_llm_content:
382
+ print("No content (text or valid files) in current user message for LLM.")
383
+ yield "" # Or some indicator of no action
384
+ return
385
+
386
+ # Augment system message with MCP tool info if enabled
387
+ augmented_sys_msg = system_message_prompt
388
+ if mcp_is_enabled and active_mcp_server_names:
389
+ mcp_tool_descriptions_for_llm = []
390
+ for server_name_iter in active_mcp_server_names:
391
+ if server_name_iter in mcp_connections:
392
+ # Use the more detailed list_mcp_tools output for the system prompt if desired
393
+ tools_list_str = list_mcp_tools(server_name_iter) # This returns markdown
394
+ mcp_tool_descriptions_for_llm.append(f"From server '{server_name_iter}':\n{tools_list_str}")
395
+
396
+ if mcp_tool_descriptions_for_llm:
397
+ full_tools_info_str = "\n\n".join(mcp_tool_descriptions_for_llm)
398
+ interaction_advice = ""
399
+ if mcp_interaction_mode_choice == "Command Mode":
400
+ interaction_advice = "The user can invoke these tools using '/mcp <server_name> <tool_name> <json_args>'."
401
+ # For Natural Language mode, the LLM doesn't need explicit instruction in system prompt
402
+ # as `analyze_message_for_tool_call` handles that part.
403
+
404
+ augmented_sys_msg += f"\n\nYou also have access to the following external tools via Model Context Protocol (MCP):\n{full_tools_info_str}\n{interaction_advice}"
405
+
406
+ # Prepare messages list for LLM
407
+ messages_for_llm_api = [{"role": "system", "content": augmented_sys_msg}]
408
+
409
+ for hist_user_turn, hist_assist_response in history_tuples:
410
+ hist_user_text, hist_user_files = hist_user_turn # Unpack ((text, [files]))
411
+
412
+ history_user_llm_content = []
413
+ if hist_user_text and hist_user_text.strip():
414
+ history_user_llm_content.append({"type": "text", "text": hist_user_text})
415
+ if hist_user_files:
416
+ for hist_file_path in hist_user_files:
417
+ encoded_hist_img = encode_image(hist_file_path)
418
+ if encoded_hist_img:
419
+ history_user_llm_content.append({
420
+ "type": "image_url",
421
+ "image_url": {"url": f"data:image/jpeg;base64,{encoded_hist_img}"}
422
+ })
423
+ if history_user_llm_content: # Only add if there's actual content
424
+ messages_for_llm_api.append({"role": "user", "content": history_user_llm_content})
425
 
426
+ if hist_assist_response and hist_assist_response.strip():
427
+ messages_for_llm_api.append({"role": "assistant", "content": hist_assist_response})
428
+
429
+ messages_for_llm_api.append({"role": "user", "content": current_user_llm_content})
430
+ # print(f"Final messages for LLM API: {json.dumps(messages_for_llm_api, indent=2)}")
431
+
432
+
433
+ llm_parameters = {
434
+ "max_tokens": max_tokens_val, "temperature": temperature_val, "top_p": top_p_val,
435
+ "frequency_penalty": frequency_penalty_val,
 
 
 
 
 
 
 
 
 
 
 
 
436
  }
437
+ if current_seed is not None:
438
+ llm_parameters["seed"] = current_seed
 
439
 
440
+ print(f"Sending request to LLM: Model={model_id_for_llm}, Params={llm_parameters}")
441
+ streamed_response_text = ""
442
  try:
443
+ llm_stream = llm_client_instance.chat_completion(
444
+ model=model_id_for_llm,
445
+ messages=messages_for_llm_api,
 
446
  stream=True,
447
+ **llm_parameters
448
  )
449
 
450
+ # print("Streaming LLM response: ", end="", flush=True)
451
+ for chunk in llm_stream:
 
 
452
  if hasattr(chunk, 'choices') and len(chunk.choices) > 0:
453
+ delta = chunk.choices.delta
454
+ if hasattr(delta, 'content') and delta.content:
455
+ token = delta.content
456
+ # print(token, end="", flush=True)
457
+ streamed_response_text += token
458
+ yield streamed_response_text
459
+ # print("\nLLM Stream finished.")
460
+ except Exception as e_llm:
461
+ error_msg = f"Error during LLM inference: {str(e_llm)}"
462
+ print(error_msg)
463
+ import traceback
464
+ traceback.print_exc()
465
+ streamed_response_text += f"\n{error_msg}" # Append error to existing stream if any
466
+ yield streamed_response_text
467
+
468
+ print(f"--- RESPOND FUNCTION COMPLETED ---")
469
 
 
470
 
471
  # GRADIO UI
472
+ with gr.Blocks(theme="Nymbo/Nymbo_Theme", title="Serverless TextGen Hub + MCP") as demo:
473
+ gr.Markdown("# Serverless TextGen Hub with MCP Client")
474
  chatbot = gr.Chatbot(
475
+ label="Chat",
476
  height=600,
477
  show_copy_button=True,
478
+ placeholder="Select a model, connect MCP servers (optional), and start chatting!",
479
+ bubble_full_width=False,
480
+ avatar_images=(None, "https://huggingface.co/datasets/huggingface/brand-assets/resolve/main/hf-logo-square.png")
 
 
 
 
 
 
 
 
 
 
 
481
  )
482
 
483
+ with gr.Row():
484
+ msg_textbox = gr.MultimodalTextbox( # Changed from gr.Textbox to gr.MultimodalTextbox
485
+ placeholder="Type a message or upload images... (Use /mcp for commands)",
486
+ show_label=False,
487
+ container=False,
488
+ scale=12,
489
+ file_types=["image"], # Can add more types like "audio", "video" if supported by models
490
+ file_count="multiple" # Allow multiple image uploads
491
+ )
492
+ # submit_button = gr.Button("Send", variant="primary", scale=1, min_width=100) # Optional explicit send button
493
+
494
+ with gr.Accordion("LLM Settings", open=False):
495
+ system_message_prompt_box = gr.Textbox(
496
+ value="You are a helpful and versatile AI assistant. You can understand text and images. If you have access to MCP tools, you can use them when appropriate or when the user asks.",
497
+ label="System Prompt", lines=3
498
  )
499
 
 
500
  with gr.Row():
501
+ with gr.Column(scale=1):
502
+ max_tokens_slider_ui = gr.Slider(minimum=128, maximum=8192, value=1024, step=128, label="Max New Tokens")
503
+ temperature_slider_ui = gr.Slider(minimum=0.0, maximum=2.0, value=0.7, step=0.05, label="Temperature")
504
+ top_p_slider_ui = gr.Slider(minimum=0.0, maximum=1.0, value=0.95, step=0.05, label="Top-P (Nucleus Sampling)")
505
+ with gr.Column(scale=1):
506
+ frequency_penalty_slider_ui = gr.Slider(minimum=-2.0, maximum=2.0, value=0.0, step=0.1, label="Frequency Penalty")
507
+ seed_slider_ui = gr.Slider(minimum=-1, maximum=65535, value=-1, step=1, label="Seed (-1 for random)")
508
+
509
+ providers_list_ui = [
510
+ "hf-inference", "cerebras", "together", "sambanova", "novita",
511
+ "cohere", "fireworks-ai", "hyperbolic", "nebius",
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
512
  ]
513
+ provider_radio_ui = gr.Radio(choices=providers_list_ui, value="hf-inference", label="Inference Provider")
514
 
515
+ byok_textbox_ui = gr.Textbox(label="Your Hugging Face API Key (Optional)", placeholder="Enter HF Token if using non-hf-inference providers or private models", type="password")
 
 
 
 
516
 
517
+ custom_model_id_box = gr.Textbox(label="Custom Model ID (Overrides selection below)", placeholder="e.g., meta-llama/Llama-3-8B-Instruct")
 
 
 
 
 
 
 
518
 
519
+ model_search_box_ui = gr.Textbox(label="Filter Featured Models", placeholder="Search...", lines=1)
 
 
 
 
 
 
520
 
521
+ # More diverse model list, including some known multimodal ones
522
+ featured_models_list_data = [
523
+ "meta-llama/Meta-Llama-3.1-8B-Instruct", # Good default
524
+ "meta-llama/Meta-Llama-3.1-70B-Instruct",
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
525
  "mistralai/Mistral-Nemo-Instruct-2407",
526
+ "mistralai/Mixtral-8x22B-Instruct-v0.1",
527
+ "Qwen/Qwen2-7B-Instruct",
528
+ "microsoft/Phi-3-medium-128k-instruct",
529
+ # Multimodal
530
+ "Salesforce/blip-image-captioning-large", # Example, might not be chat
531
+ "llava-hf/llava-1.5-7b-hf", # LLaVA example
532
+ "microsoft/kosmos-2-patch14-224", # Kosmos-2
533
+ "google/paligemma-3b-mix-448", # PaliGemma
 
 
 
 
 
534
  ]
535
+ featured_model_radio_ui = gr.Radio(label="Select a Featured Model", choices=featured_models_list_data, value="meta-llama/Meta-Llama-3.1-8B-Instruct", interactive=True)
 
 
 
 
 
 
536
 
537
+ gr.Markdown("Tip: For multimodal chat, ensure selected model supports image inputs (e.g., LLaVA, PaliGemma, Kosmos-2).")
538
+
539
+ with gr.Accordion("MCP Client Settings", open=False):
540
+ mcp_enabled_checkbox_ui = gr.Checkbox(label="Enable MCP Support", value=False, info="Connect to external tools and services via MCP.")
 
 
 
 
 
541
 
542
  with gr.Row():
543
+ mcp_server_url_textbox = gr.Textbox(label="MCP Server URL", placeholder="e.g., https://your-mcp-server.hf.space/gradio_api/mcp/sse")
544
+ mcp_server_name_textbox = gr.Textbox(label="Friendly Server Name (Optional)", placeholder="MyTTS_Server")
545
+ mcp_connect_button_ui = gr.Button("Connect", variant="secondary")
 
 
 
 
 
 
 
 
 
 
546
 
547
+ mcp_connection_status_textbox = gr.Textbox(label="MCP Connection Status", placeholder="No MCP servers connected.", interactive=False, lines=2)
 
 
 
 
548
 
549
+ active_mcp_servers_dropdown = gr.Dropdown(
550
+ label="Use Tools From (Select Active MCP Servers)", choices=[], multiselect=True,
551
+ info="Choose which connected servers the LLM can use tools from."
 
 
552
  )
553
 
554
+ mcp_interaction_mode_radio = gr.Radio(
555
+ label="MCP Interaction Mode", choices=["Natural Language", "Command Mode"], value="Natural Language",
556
+ info="Natural Language: AI tries to detect tool use. Command Mode: Use '/mcp ...' syntax."
 
 
557
  )
558
+ gr.Markdown("Example MCP Command: `/mcp MyTTS text_to_audio {\"text\": \"Hello world!\"}`")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
559
 
560
+ # --- Event Handlers ---
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
561
 
562
+ # Store history as list of tuples: [ ((user_text, [user_files]), assistant_response), ... ]
563
+ chat_history_state = gr.State([])
564
+
565
+ def user_interaction(user_multimodal_input, current_chat_history):
566
+ user_text = user_multimodal_input["text"] if user_multimodal_input and "text" in user_multimodal_input else ""
567
+ user_files = user_multimodal_input["files"] if user_multimodal_input and "files" in user_multimodal_input else []
568
+
569
+ # Only add to history if there's text or files
570
+ if user_text or user_files:
571
+ current_chat_history.append( ((user_text, user_files), None) ) # Append user turn, assistant response is None initially
572
+ return current_chat_history, gr.update(value={"text": "", "files": []}) # Clear input textbox
573
+
574
+ def bot_response_generator(
575
+ current_chat_history, system_prompt, max_tokens, temp, top_p_val, freq_penalty, seed_val,
576
+ provider_val, api_key_val, custom_model_val, selected_model_val, # Removed search_term as it's not directly used by respond
577
+ mcp_enabled_val, active_servers_val, mcp_mode_val
578
+ ):
579
+ if not current_chat_history or current_chat_history[-1] is not None: # If no user message or last message already has bot response
580
+ yield current_chat_history # Or simply `return current_chat_history` if not streaming
581
+ return
582
+
583
+ user_turn_content, _ = current_chat_history[-1] # Get the latest user turn: (text, [files])
584
+ message_text, message_files = user_turn_content
585
+
586
+ # The history passed to `respond` should be all turns *before* the current one
587
+ history_for_respond = current_chat_history[:-1]
588
+
589
+ response_stream = respond(
590
+ message_text, message_files, history_for_respond,
591
+ system_prompt, max_tokens, temp, top_p_val, freq_penalty, seed_val,
592
+ provider_val, api_key_val, custom_model_val, selected_model_val,
593
+ mcp_enabled_val, active_servers_val, mcp_mode_val
594
+ )
 
595
 
596
+ full_bot_message = ""
597
+ for chunk in response_stream:
598
+ full_bot_message = chunk
599
+ current_chat_history[-1] = (user_turn_content, full_bot_message) # Update last item's assistant part
600
+ yield current_chat_history
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
601
 
602
+ # Link UI components to functions
603
+ msg_textbox.submit(
604
+ user_interaction,
605
+ inputs=[msg_textbox, chat_history_state],
606
+ outputs=[chat_history_state, msg_textbox] # Update history and clear input
607
+ ).then(
608
+ bot_response_generator,
609
+ inputs=[
610
+ chat_history_state, system_message_prompt_box, max_tokens_slider_ui, temperature_slider_ui,
611
+ top_p_slider_ui, frequency_penalty_slider_ui, seed_slider_ui, provider_radio_ui,
612
+ byok_textbox_ui, custom_model_id_box, featured_model_radio_ui,
613
+ mcp_enabled_checkbox_ui, active_mcp_servers_dropdown, mcp_interaction_mode_radio
614
+ ],
615
+ outputs=[chatbot] # Stream to chatbot
616
  )
 
617
 
618
+ # MCP Connection
619
+ def handle_mcp_connect(url, name_suggestion):
620
+ if not url or not url.strip():
621
+ return "MCP Server URL cannot be empty.", gr.update(choices=list(mcp_connections.keys()))
622
+
623
+ _, status_msg = connect_to_mcp_server(url, name_suggestion)
624
+ # Update dropdown choices with current server names
625
+ new_choices = list(mcp_connections.keys())
626
+ # Preserve selected values if they are still valid connections
627
+ # current_selected = active_mcp_servers_dropdown.value # This might not work directly
628
+ # new_selected = [s for s in current_selected if s in new_choices]
629
+ return status_msg, gr.update(choices=new_choices) #, value=new_selected)
630
+
631
+ mcp_connect_button_ui.click(
632
+ handle_mcp_connect,
633
+ inputs=[mcp_server_url_textbox, mcp_server_name_textbox],
634
+ outputs=[mcp_connection_status_textbox, active_mcp_servers_dropdown]
635
  )
 
636
 
637
+ # Model Filtering
638
+ def filter_featured_models(search_query):
639
+ if not search_query:
640
+ return gr.update(choices=featured_models_list_data)
641
+ filtered = [m for m in featured_models_list_data if search_query.lower() in m.lower()]
642
+ return gr.update(choices=filtered if filtered else ["No models match your search"])
643
+
644
+ model_search_box_ui.change(filter_featured_models, inputs=model_search_box_ui, outputs=featured_model_radio_ui)
645
+
646
+ # Auto-select hf-inference if BYOK is empty and other provider is chosen
647
+ def validate_api_key_for_provider(api_key_text, current_provider):
648
+ if not api_key_text.strip() and current_provider != "hf-inference":
649
+ gr.Warning("API Key needed for non-hf-inference providers. Defaulting to hf-inference.")
650
+ return gr.update(value="hf-inference")
651
+ return current_provider # No change if key provided or hf-inference selected
652
+
653
+ byok_textbox_ui.change(validate_api_key_for_provider, inputs=[byok_textbox_ui, provider_radio_ui], outputs=provider_radio_ui)
654
+ provider_radio_ui.change(validate_api_key_for_provider, inputs=[byok_textbox_ui, provider_radio_ui], outputs=provider_radio_ui)
655
 
 
656
 
657
  if __name__ == "__main__":
658
+ print("Launching Gradio demo...")
659
+ demo.queue().launch(debug=True, show_api=False) # mcp_server=False as this is a client app