Nymbo commited on
Commit
75d7afe
·
verified ·
1 Parent(s): 11de92c

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +359 -688
app.py CHANGED
@@ -5,180 +5,119 @@ import json
5
  import base64
6
  from PIL import Image
7
  import io
8
- import requests
9
- from typing import Dict, List, Optional, Any, Union
10
- import time
11
- import logging
12
 
13
- # Setup logging
14
- logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
15
- logger = logging.getLogger(__name__)
16
 
17
  ACCESS_TOKEN = os.getenv("HF_TOKEN")
18
- logger.info("Access token loaded.")
19
-
20
- # MCP Client Configuration
21
- MCP_SERVERS = {}
22
- try:
23
- mcp_config = os.getenv("MCP_CONFIG")
24
- if mcp_config:
25
- MCP_SERVERS = json.loads(mcp_config)
26
- logger.info(f"Loaded MCP configuration: {len(MCP_SERVERS)} servers defined")
27
- except Exception as e:
28
- logger.error(f"Error loading MCP configuration: {e}")
29
-
30
- # Function to encode image to base64
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
31
  def encode_image(image_path):
32
  if not image_path:
33
- logger.warning("No image path provided")
34
  return None
35
 
36
  try:
37
- logger.info(f"Encoding image from path: {image_path}")
38
-
39
- # If it's already a PIL Image
40
  if isinstance(image_path, Image.Image):
41
  image = image_path
42
  else:
43
- # Try to open the image file
44
  image = Image.open(image_path)
45
 
46
- # Convert to RGB if image has an alpha channel (RGBA)
47
  if image.mode == 'RGBA':
48
  image = image.convert('RGB')
49
 
50
- # Encode to base64
51
  buffered = io.BytesIO()
52
  image.save(buffered, format="JPEG")
53
  img_str = base64.b64encode(buffered.getvalue()).decode("utf-8")
54
- logger.info("Image encoded successfully")
55
  return img_str
56
  except Exception as e:
57
- logger.error(f"Error encoding image: {e}")
58
- return None
59
-
60
- # MCP Client implementation
61
- class MCPClient:
62
- def __init__(self, server_url: str):
63
- self.server_url = server_url
64
- self.session_id = None
65
- logger.info(f"Initialized MCP Client for server: {server_url}")
66
-
67
- def connect(self) -> bool:
68
- """Establish connection with the MCP server"""
69
- try:
70
- response = requests.post(
71
- f"{self.server_url}/connect",
72
- json={"client": "Serverless-TextGen-Hub", "version": "1.0.0"}
73
- )
74
- if response.status_code == 200:
75
- result = response.json()
76
- self.session_id = result.get("session_id")
77
- logger.info(f"Connected to MCP server with session ID: {self.session_id}")
78
- return True
79
- else:
80
- logger.error(f"Failed to connect to MCP server: {response.status_code} - {response.text}")
81
- return False
82
- except Exception as e:
83
- logger.error(f"Error connecting to MCP server: {e}")
84
- return False
85
-
86
- def list_tools(self) -> List[Dict]:
87
- """List available tools from the MCP server"""
88
- if not self.session_id:
89
- if not self.connect():
90
- return []
91
-
92
- try:
93
- response = requests.get(
94
- f"{self.server_url}/tools/list",
95
- headers={"X-MCP-Session": self.session_id}
96
- )
97
- if response.status_code == 200:
98
- result = response.json()
99
- tools = result.get("tools", [])
100
- logger.info(f"Retrieved {len(tools)} tools from MCP server")
101
- return tools
102
- else:
103
- logger.error(f"Failed to list tools: {response.status_code} - {response.text}")
104
- return []
105
- except Exception as e:
106
- logger.error(f"Error listing tools: {e}")
107
- return []
108
-
109
- def call_tool(self, tool_name: str, args: Dict) -> Dict:
110
- """Call a tool on the MCP server"""
111
- if not self.session_id:
112
- if not self.connect():
113
- return {"error": "Not connected to MCP server"}
114
-
115
- try:
116
- response = requests.post(
117
- f"{self.server_url}/tools/call",
118
- headers={"X-MCP-Session": self.session_id},
119
- json={"name": tool_name, "arguments": args}
120
- )
121
- if response.status_code == 200:
122
- result = response.json()
123
- logger.info(f"Successfully called tool {tool_name}")
124
- return result
125
- else:
126
- error_msg = f"Failed to call tool {tool_name}: {response.status_code} - {response.text}"
127
- logger.error(error_msg)
128
- return {"error": error_msg}
129
- except Exception as e:
130
- error_msg = f"Error calling tool {tool_name}: {e}"
131
- logger.error(error_msg)
132
- return {"error": error_msg}
133
-
134
- # Text-to-speech client function
135
- def text_to_speech(text: str, server_name: str = None) -> Optional[str]:
136
- """
137
- Convert text to speech using an MCP TTS server
138
- Returns an audio URL that can be embedded in the chat
139
- """
140
- if not server_name or server_name not in MCP_SERVERS:
141
- logger.warning(f"TTS server {server_name} not configured")
142
- return None
143
-
144
- server_url = MCP_SERVERS[server_name].get("url")
145
- if not server_url:
146
- logger.warning(f"No URL found for TTS server {server_name}")
147
- return None
148
-
149
- client = MCPClient(server_url)
150
-
151
- # List available tools to find the TTS tool
152
- tools = client.list_tools()
153
- tts_tool = next((t for t in tools if "text_to_audio" in t["name"] or "tts" in t["name"]), None)
154
-
155
- if not tts_tool:
156
- logger.warning(f"No TTS tool found on server {server_name}")
157
- return None
158
-
159
- # Call the TTS tool
160
- result = client.call_tool(tts_tool["name"], {"text": text, "speed": 1.0})
161
-
162
- if "error" in result:
163
- logger.error(f"TTS error: {result['error']}")
164
- return None
165
-
166
- # Process the result - usually a base64 encoded WAV
167
- audio_data = result.get("audio") or result.get("content") or result.get("result")
168
-
169
- if isinstance(audio_data, str) and audio_data.startswith("data:audio"):
170
- # Already a data URL
171
- return audio_data
172
- elif isinstance(audio_data, str):
173
- # Assume it's base64 encoded
174
- return f"data:audio/wav;base64,{audio_data}"
175
- else:
176
- logger.error(f"Unexpected TTS result format: {type(audio_data)}")
177
  return None
178
 
 
179
  def respond(
180
- message,
181
- image_files, # Changed parameter name and structure
182
  history: list[tuple[str, str]],
183
  system_message,
184
  max_tokens,
@@ -189,582 +128,314 @@ def respond(
189
  provider,
190
  custom_api_key,
191
  custom_model,
192
- model_search_term,
193
- selected_model,
194
- tts_enabled=False,
195
- tts_server=None
196
  ):
197
- logger.info(f"Received message: {message}")
198
- logger.info(f"Received {len(image_files) if image_files else 0} images")
199
- logger.info(f"History: {history}")
200
- logger.info(f"System message: {system_message}")
201
- logger.info(f"Max tokens: {max_tokens}, Temperature: {temperature}, Top-P: {top_p}")
202
- logger.info(f"Frequency Penalty: {frequency_penalty}, Seed: {seed}")
203
- logger.info(f"Selected provider: {provider}")
204
- logger.info(f"Custom API Key provided: {bool(custom_api_key.strip())}")
205
- logger.info(f"Selected model (custom_model): {custom_model}")
206
- logger.info(f"Model search term: {model_search_term}")
207
- logger.info(f"Selected model from radio: {selected_model}")
208
- logger.info(f"TTS enabled: {tts_enabled}, TTS server: {tts_server}")
209
-
210
- # Determine which token to use
211
  token_to_use = custom_api_key if custom_api_key.strip() != "" else ACCESS_TOKEN
 
 
 
 
 
 
 
 
 
212
 
213
- if custom_api_key.strip() != "":
214
- logger.info("USING CUSTOM API KEY: BYOK token provided by user is being used for authentication")
215
- else:
216
- logger.info("USING DEFAULT API KEY: Environment variable HF_TOKEN is being used for authentication")
217
-
218
- # Initialize the Inference Client with the provider and appropriate token
219
- client = InferenceClient(token=token_to_use, provider=provider)
220
- logger.info(f"Hugging Face Inference Client initialized with {provider} provider.")
221
-
222
- # Convert seed to None if -1 (meaning random)
223
- if seed == -1:
224
- seed = None
225
-
226
- # Create multimodal content if images are present
227
- if image_files and len(image_files) > 0:
228
- # Process the user message to include images
229
- user_content = []
230
-
231
- # Add text part if there is any
232
- if message and message.strip():
233
- user_content.append({
234
- "type": "text",
235
- "text": message
236
- })
237
-
238
- # Add image parts
239
- for img in image_files:
240
- if img is not None:
241
- # Get raw image data from path
242
- try:
243
- encoded_image = encode_image(img)
244
- if encoded_image:
245
- user_content.append({
246
- "type": "image_url",
247
- "image_url": {
248
- "url": f"data:image/jpeg;base64,{encoded_image}"
249
- }
250
- })
251
- except Exception as e:
252
- logger.error(f"Error encoding image: {e}")
253
- else:
254
- # Text-only message
255
- user_content = message
256
-
257
- # Prepare messages in the format expected by the API
258
- messages = [{"role": "system", "content": system_message}]
259
- logger.info("Initial messages array constructed.")
260
-
261
- # Add conversation history to the context
262
- for val in history:
263
- user_part = val[0]
264
- assistant_part = val[1]
265
- if user_part:
266
- # Handle both text-only and multimodal messages in history
267
- if isinstance(user_part, tuple) and len(user_part) == 2:
268
- # This is a multimodal message with text and images
269
- history_content = []
270
- if user_part[0]: # Text
271
- history_content.append({
272
- "type": "text",
273
- "text": user_part[0]
274
  })
275
-
276
- for img in user_part[1]: # Images
277
- if img:
278
- try:
279
- encoded_img = encode_image(img)
280
- if encoded_img:
281
- history_content.append({
282
- "type": "image_url",
283
- "image_url": {
284
- "url": f"data:image/jpeg;base64,{encoded_img}"
285
- }
286
- })
287
- except Exception as e:
288
- logger.error(f"Error encoding history image: {e}")
289
-
290
- messages.append({"role": "user", "content": history_content})
291
- else:
292
- # Regular text message
293
- messages.append({"role": "user", "content": user_part})
294
- logger.info(f"Added user message to context (type: {type(user_part)})")
295
-
296
- if assistant_part:
297
- messages.append({"role": "assistant", "content": assistant_part})
298
- logger.info(f"Added assistant message to context: {assistant_part}")
299
-
300
- # Append the latest user message
301
- messages.append({"role": "user", "content": user_content})
302
- logger.info(f"Latest user message appended (content type: {type(user_content)})")
303
-
304
- # Determine which model to use, prioritizing custom_model if provided
305
- model_to_use = custom_model.strip() if custom_model.strip() != "" else selected_model
306
- logger.info(f"Model selected for inference: {model_to_use}")
307
-
308
- # Start with an empty string to build the response as tokens stream in
309
- response = ""
310
- logger.info(f"Sending request to {provider} provider.")
311
-
312
- # Prepare parameters for the chat completion request
313
- parameters = {
314
- "max_tokens": max_tokens,
315
- "temperature": temperature,
316
- "top_p": top_p,
317
- "frequency_penalty": frequency_penalty,
318
- }
319
 
320
- if seed is not None:
321
- parameters["seed"] = seed
 
 
322
 
323
- # Use the InferenceClient for making the request
324
- try:
325
- # Create a generator for the streaming response
326
- stream = client.chat_completion(
327
- model=model_to_use,
328
- messages=messages,
329
- stream=True,
330
- **parameters
331
- )
332
-
333
- logger.info("Received tokens: ")
334
-
335
- # Process the streaming response
336
- for chunk in stream:
337
- if hasattr(chunk, 'choices') and len(chunk.choices) > 0:
338
- # Extract the content from the response
339
- if hasattr(chunk.choices[0], 'delta') and hasattr(chunk.choices[0].delta, 'content'):
340
- token_text = chunk.choices[0].delta.content
341
- if token_text:
342
- print(token_text, end="", flush=True)
343
- response += token_text
344
- yield response
345
-
346
- # If TTS is enabled and we have a response, convert it to speech
347
- if tts_enabled and tts_server and response:
348
- logger.info(f"Converting response to speech using TTS server: {tts_server}")
349
- try:
350
- audio_url = text_to_speech(response, tts_server)
351
- if audio_url:
352
- # Add audio tag to the end of the response
353
- response += f"\n\n<audio src='{audio_url}' controls></audio>"
354
- yield response
355
- else:
356
- logger.warning("TTS conversion failed, continuing without audio")
357
- except Exception as e:
358
- logger.error(f"Error in TTS conversion: {e}")
359
- # Continue without TTS if there's an error
360
-
361
- print()
362
- except Exception as e:
363
- logger.error(f"Error during inference: {e}")
364
- response += f"\nError: {str(e)}"
365
- yield response
 
 
 
 
 
 
 
 
 
 
 
 
 
 
366
 
367
- logger.info("Completed response generation.")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
368
 
369
- # Function to validate provider selection based on BYOK
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
370
  def validate_provider(api_key, provider):
371
  if not api_key.strip() and provider != "hf-inference":
372
  return gr.update(value="hf-inference")
373
  return gr.update(value=provider)
374
 
375
- # Function to list available MCP servers
376
- def list_mcp_servers():
377
- """List all configured MCP servers"""
378
- return list(MCP_SERVERS.keys())
379
-
380
  # GRADIO UI
381
  with gr.Blocks(theme="Nymbo/Nymbo_Theme") as demo:
382
- # Create the chatbot component
383
  chatbot = gr.Chatbot(
384
- height=600,
385
- show_copy_button=True,
386
- placeholder="Select a model and begin chatting. Now supports multiple inference providers, multimodal inputs, and MCP servers",
387
- layout="panel"
 
388
  )
389
- logger.info("Chatbot interface created.")
390
 
391
- # Multimodal textbox for messages (combines text and file uploads)
392
- msg = gr.MultimodalTextbox(
393
  placeholder="Type a message or upload images...",
394
- show_label=False,
395
- container=False,
396
- scale=12,
397
- file_types=["image"],
398
- file_count="multiple",
399
- sources=["upload"]
400
  )
401
 
402
- # Create accordion for settings
403
  with gr.Accordion("Settings", open=False):
404
- # System message
405
- system_message_box = gr.Textbox(
406
- value="You are a helpful AI assistant that can understand images and text.",
407
- placeholder="You are a helpful assistant.",
408
- label="System Prompt"
409
- )
410
-
411
- # Generation parameters
412
  with gr.Row():
413
- with gr.Column():
414
- max_tokens_slider = gr.Slider(
415
- minimum=1,
416
- maximum=4096,
417
- value=512,
418
- step=1,
419
- label="Max tokens"
420
- )
421
-
422
- temperature_slider = gr.Slider(
423
- minimum=0.1,
424
- maximum=4.0,
425
- value=0.7,
426
- step=0.1,
427
- label="Temperature"
428
- )
429
-
430
- top_p_slider = gr.Slider(
431
- minimum=0.1,
432
- maximum=1.0,
433
- value=0.95,
434
- step=0.05,
435
- label="Top-P"
436
- )
437
-
438
- with gr.Column():
439
- frequency_penalty_slider = gr.Slider(
440
- minimum=-2.0,
441
- maximum=2.0,
442
- value=0.0,
443
- step=0.1,
444
- label="Frequency Penalty"
445
- )
446
-
447
- seed_slider = gr.Slider(
448
- minimum=-1,
449
- maximum=65535,
450
- value=-1,
451
- step=1,
452
- label="Seed (-1 for random)"
453
- )
454
-
455
- # Provider selection
456
- providers_list = [
457
- "hf-inference", # Default Hugging Face Inference
458
- "cerebras", # Cerebras provider
459
- "together", # Together AI
460
- "sambanova", # SambaNova
461
- "novita", # Novita AI
462
- "cohere", # Cohere
463
- "fireworks-ai", # Fireworks AI
464
- "hyperbolic", # Hyperbolic
465
- "nebius", # Nebius
466
- ]
467
-
468
- provider_radio = gr.Radio(
469
- choices=providers_list,
470
- value="hf-inference",
471
- label="Inference Provider",
472
- )
473
-
474
- # New BYOK textbox
475
- byok_textbox = gr.Textbox(
476
- value="",
477
- label="BYOK (Bring Your Own Key)",
478
- info="Enter a custom Hugging Face API key here. When empty, only 'hf-inference' provider can be used.",
479
- placeholder="Enter your Hugging Face API token",
480
- type="password" # Hide the API key for security
481
- )
482
-
483
- # Custom model box
484
- custom_model_box = gr.Textbox(
485
- value="",
486
- label="Custom Model",
487
- info="(Optional) Provide a custom Hugging Face model path. Overrides any selected featured model.",
488
- placeholder="meta-llama/Llama-3.3-70B-Instruct"
489
- )
490
-
491
- # Model search
492
- model_search_box = gr.Textbox(
493
- label="Filter Models",
494
- placeholder="Search for a featured model...",
495
- lines=1
496
- )
497
-
498
- # Featured models list
499
- # Updated to include multimodal models
500
- models_list = [
501
- "meta-llama/Llama-3.2-11B-Vision-Instruct",
502
- "meta-llama/Llama-3.3-70B-Instruct",
503
- "meta-llama/Llama-3.1-70B-Instruct",
504
- "meta-llama/Llama-3.0-70B-Instruct",
505
- "meta-llama/Llama-3.2-3B-Instruct",
506
- "meta-llama/Llama-3.2-1B-Instruct",
507
- "meta-llama/Llama-3.1-8B-Instruct",
508
- "NousResearch/Hermes-3-Llama-3.1-8B",
509
- "NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO",
510
- "mistralai/Mistral-Nemo-Instruct-2407",
511
- "mistralai/Mixtral-8x7B-Instruct-v0.1",
512
- "mistralai/Mistral-7B-Instruct-v0.3",
513
- "mistralai/Mistral-7B-Instruct-v0.2",
514
- "Qwen/Qwen3-235B-A22B",
515
- "Qwen/Qwen3-32B",
516
- "Qwen/Qwen2.5-72B-Instruct",
517
- "Qwen/Qwen2.5-3B-Instruct",
518
- "Qwen/Qwen2.5-0.5B-Instruct",
519
- "Qwen/QwQ-32B",
520
- "Qwen/Qwen2.5-Coder-32B-Instruct",
521
- "microsoft/Phi-3.5-mini-instruct",
522
- "microsoft/Phi-3-mini-128k-instruct",
523
  "microsoft/Phi-3-mini-4k-instruct",
524
  ]
 
 
525
 
526
- featured_model_radio = gr.Radio(
527
- label="Select a model below",
528
- choices=models_list,
529
- value="meta-llama/Llama-3.2-11B-Vision-Instruct", # Default to a multimodal model
530
- interactive=True
 
 
 
 
531
  )
 
 
532
 
533
- gr.Markdown("[View all Text-to-Text models](https://huggingface.co/models?inference_provider=all&pipeline_tag=text-generation&sort=trending) | [View all multimodal models](https://huggingface.co/models?inference_provider=all&pipeline_tag=image-text-to-text&sort=trending)")
534
-
535
- # MCP TTS integration
536
- with gr.Accordion("MCP Integration", open=False):
537
- gr.Markdown("## Model Context Protocol (MCP) Integration")
538
- gr.Markdown("Connect to MCP servers to extend functionality.")
539
-
540
- tts_enabled = gr.Checkbox(
541
- label="Enable Text-to-Speech",
542
- value=False,
543
- info="When enabled, responses will be converted to speech using the selected MCP TTS server"
544
- )
545
-
546
- # Create dropdown for available MCP servers
547
- available_servers = list_mcp_servers()
548
- tts_server = gr.Dropdown(
549
- label="TTS Server",
550
- choices=available_servers,
551
- value=available_servers[0] if available_servers else None,
552
- interactive=True,
553
- visible=len(available_servers) > 0
554
- )
555
-
556
- # If no servers configured, show a message
557
- if not available_servers:
558
- gr.Markdown("""
559
- No MCP servers configured. Add them using the MCP_CONFIG environment variable:
560
- ```json
561
- {
562
- "kokoroTTS": {
563
- "url": "https://your-kokoro-tts-server/gradio_api/mcp/sse"
564
- }
565
- }
566
- ```
567
- """)
568
 
569
- # Chat history state
570
- chat_history = gr.State([])
571
-
572
- # Function to filter models
 
 
 
 
 
 
 
573
  def filter_models(search_term):
574
- logger.info(f"Filtering models with search term: {search_term}")
575
- filtered = [m for m in models_list if search_term.lower() in m.lower()]
576
- logger.info(f"Filtered models: {filtered}")
577
- return gr.update(choices=filtered)
578
 
579
- # Function to set custom model from radio
580
  def set_custom_model_from_radio(selected):
581
- logger.info(f"Featured model selected: {selected}")
582
- return selected
583
 
584
- # Function for the chat interface
585
- def user(user_message, history):
586
- # Debug logging for troubleshooting
587
- logger.info(f"User message received: {user_message}")
588
-
589
- # Skip if message is empty (no text and no files)
590
- if not user_message or (not user_message.get("text") and not user_message.get("files")):
591
- logger.info("Empty message, skipping")
592
- return history
593
-
594
- # Prepare multimodal message format
595
- text_content = user_message.get("text", "").strip()
596
- files = user_message.get("files", [])
597
-
598
- logger.info(f"Text content: {text_content}")
599
- logger.info(f"Files: {files}")
600
-
601
- # If both text and files are empty, skip
602
- if not text_content and not files:
603
- logger.info("No content to display")
604
- return history
605
-
606
- # Add message with images to history
607
- if files and len(files) > 0:
608
- # Add text message first if it exists
609
- if text_content:
610
- # Add a separate text message
611
- logger.info(f"Adding text message: {text_content}")
612
- history.append([text_content, None])
613
-
614
- # Then add each image file separately
615
- for file_path in files:
616
- if file_path and isinstance(file_path, str):
617
- logger.info(f"Adding image: {file_path}")
618
- # Add image as a separate message with no text
619
- history.append([f"![Image]({file_path})", None])
620
-
621
- return history
622
- else:
623
- # For text-only messages
624
- logger.info(f"Adding text-only message: {text_content}")
625
- history.append([text_content, None])
626
- return history
627
 
628
- # Define bot response function
629
- def bot(history, system_msg, max_tokens, temperature, top_p, freq_penalty, seed, provider, api_key, custom_model, search_term, selected_model, tts_enabled, tts_server):
630
- # Check if history is valid
631
- if not history or len(history) == 0:
632
- logger.info("No history to process")
633
- return history
634
-
635
- # Get the most recent message and detect if it's an image
636
- user_message = history[-1][0]
637
- logger.info(f"Processing user message: {user_message}")
638
-
639
- is_image = False
640
- image_path = None
641
- text_content = user_message
642
-
643
- # Check if this is an image message (marked with ![Image])
644
- if isinstance(user_message, str) and user_message.startswith("![Image]("):
645
- is_image = True
646
- # Extract image path from markdown format ![Image](path)
647
- image_path = user_message.replace("![Image](", "").replace(")", "")
648
- logger.info(f"Image detected: {image_path}")
649
- text_content = "" # No text for image-only messages
650
-
651
- # Look back for text context if this is an image
652
- text_context = ""
653
- if is_image and len(history) > 1:
654
- # Use the previous message as context if it's text
655
- prev_message = history[-2][0]
656
- if isinstance(prev_message, str) and not prev_message.startswith("![Image]("):
657
- text_context = prev_message
658
- logger.info(f"Using text context from previous message: {text_context}")
659
-
660
- # Process message through respond function
661
- history[-1][1] = ""
662
-
663
- # Use either the image or text for the API
664
- if is_image:
665
- # For image messages
666
- for response in respond(
667
- text_context, # Text context from previous message if any
668
- [image_path], # Current image
669
- history[:-1], # Previous history
670
- system_msg,
671
- max_tokens,
672
- temperature,
673
- top_p,
674
- freq_penalty,
675
- seed,
676
- provider,
677
- api_key,
678
- custom_model,
679
- search_term,
680
- selected_model,
681
- tts_enabled,
682
- tts_server
683
- ):
684
- history[-1][1] = response
685
- yield history
686
- else:
687
- # For text-only messages
688
- for response in respond(
689
- text_content, # Text message
690
- None, # No image
691
- history[:-1], # Previous history
692
- system_msg,
693
- max_tokens,
694
- temperature,
695
- top_p,
696
- freq_penalty,
697
- seed,
698
- provider,
699
- api_key,
700
- custom_model,
701
- search_term,
702
- selected_model,
703
- tts_enabled,
704
- tts_server
705
- ):
706
- history[-1][1] = response
707
- yield history
708
-
709
- # Event handlers - only using the MultimodalTextbox's built-in submit functionality
710
- msg.submit(
711
- user,
712
- [msg, chatbot],
713
- [chatbot],
714
- queue=False
715
- ).then(
716
- bot,
717
- [chatbot, system_message_box, max_tokens_slider, temperature_slider, top_p_slider,
718
- frequency_penalty_slider, seed_slider, provider_radio, byok_textbox, custom_model_box,
719
- model_search_box, featured_model_radio, tts_enabled, tts_server],
720
- [chatbot]
721
- ).then(
722
- lambda: {"text": "", "files": []}, # Clear inputs after submission
723
- None,
724
- [msg]
725
- )
726
-
727
- # Connect the model filter to update the radio choices
728
- model_search_box.change(
729
- fn=filter_models,
730
- inputs=model_search_box,
731
- outputs=featured_model_radio
732
- )
733
- logger.info("Model search box change event linked.")
734
 
735
- # Connect the featured model radio to update the custom model box
736
- featured_model_radio.change(
737
- fn=set_custom_model_from_radio,
738
- inputs=featured_model_radio,
739
- outputs=custom_model_box
740
- )
741
- logger.info("Featured model radio button change event linked.")
742
-
743
- # Connect the BYOK textbox to validate provider selection
744
- byok_textbox.change(
745
- fn=validate_provider,
746
- inputs=[byok_textbox, provider_radio],
747
- outputs=provider_radio
748
- )
749
- logger.info("BYOK textbox change event linked.")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
750
 
751
- # Also validate provider when the radio changes to ensure consistency
752
- provider_radio.change(
753
- fn=validate_provider,
754
- inputs=[byok_textbox, provider_radio],
755
- outputs=provider_radio
756
  )
757
- logger.info("Provider radio button change event linked.")
758
 
759
- # Update TTS server dropdown visibility based on the TTS toggle
760
- tts_enabled.change(
761
- lambda enabled: gr.update(visible=enabled and len(list_mcp_servers()) > 0),
762
- inputs=tts_enabled,
763
- outputs=tts_server
764
- )
765
 
766
- logger.info("Gradio interface initialized.")
 
 
767
 
 
768
  if __name__ == "__main__":
769
- logger.info("Launching the demo application.")
770
- demo.launch(show_api=True)
 
5
  import base64
6
  from PIL import Image
7
  import io
8
+ import atexit
 
 
 
9
 
10
+ # Ensure smolagents and mcp are installed: pip install "smolagents[mcp]" mcp
11
+ from smolagents import ToolCollection, CodeAgent
12
+ from smolagents.mcp_client import MCPClient as SmolMCPClient # For connecting to MCP SSE servers
13
 
14
  ACCESS_TOKEN = os.getenv("HF_TOKEN")
15
+ print("Access token loaded.")
16
+
17
+ # --- MCP Client Integration ---
18
+ mcp_tools_collection = ToolCollection(tools=[]) # Global store for loaded MCP tools
19
+ mcp_client_instances = [] # To keep track of client instances for proper closing
20
+
21
+ DEFAULT_MCP_SERVERS = [
22
+ {"name": "KokoroTTS (Example)", "type": "sse", "url": "https://fdaudens-kokoro-mcp.hf.space/gradio_api/mcp/sse"}
23
+ ]
24
+
25
+ def load_mcp_tools(server_configs_list):
26
+ global mcp_tools_collection, mcp_client_instances
27
+
28
+ # Close any existing client instances before loading new ones
29
+ for client_instance in mcp_client_instances:
30
+ try:
31
+ client_instance.close()
32
+ print(f"Closed existing MCP client: {client_instance}")
33
+ except Exception as e:
34
+ print(f"Error closing existing MCP client {client_instance}: {e}")
35
+ mcp_client_instances = []
36
+
37
+ all_discovered_tools = []
38
+ if not server_configs_list:
39
+ print("No MCP server configurations provided. Clearing MCP tools.")
40
+ mcp_tools_collection = ToolCollection(tools=all_discovered_tools)
41
+ return
42
+
43
+ print(f"Loading MCP tools from {len(server_configs_list)} server configurations...")
44
+ for config in server_configs_list:
45
+ server_name = config.get('name', config.get('url', 'Unknown Server'))
46
+ try:
47
+ if config.get("type") == "sse":
48
+ sse_url = config["url"]
49
+ print(f"Attempting to connect to MCP SSE server: {server_name} at {sse_url}")
50
+
51
+ # Using SmolMCPClient for SSE servers as shown in documentation
52
+ # The constructor expects server_parameters={"url": sse_url}
53
+ smol_mcp_client = SmolMCPClient(server_parameters={"url": sse_url})
54
+ mcp_client_instances.append(smol_mcp_client) # Keep track to close later
55
+
56
+ discovered_tools_from_server = smol_mcp_client.get_tools() # Returns a list of Tool objects
57
+
58
+ if discovered_tools_from_server:
59
+ all_discovered_tools.extend(list(discovered_tools_from_server))
60
+ print(f"Discovered {len(discovered_tools_from_server)} tools from {server_name}.")
61
+ else:
62
+ print(f"No tools discovered from {server_name}.")
63
+ # Add elif for "stdio" type if needed in the future, though it's more complex for Gradio apps
64
+ else:
65
+ print(f"Unsupported MCP server type '{config.get('type')}' for {server_name}. Skipping.")
66
+ except Exception as e:
67
+ print(f"Error loading MCP tools from {server_name}: {e}")
68
+
69
+ mcp_tools_collection = ToolCollection(tools=all_discovered_tools)
70
+ if mcp_tools_collection and len(mcp_tools_collection.tools) > 0:
71
+ print(f"Successfully loaded a total of {len(mcp_tools_collection.tools)} MCP tools:")
72
+ for tool in mcp_tools_collection.tools:
73
+ print(f" - {tool.name}: {tool.description[:100]}...") # Print short description
74
+ else:
75
+ print("No MCP tools were loaded, or an error occurred.")
76
+
77
+ def cleanup_mcp_client_instances_on_exit():
78
+ global mcp_client_instances
79
+ print("Attempting to clean up MCP client instances on application exit...")
80
+ for client_instance in mcp_client_instances:
81
+ try:
82
+ client_instance.close()
83
+ print(f"Closed MCP client: {client_instance}")
84
+ except Exception as e:
85
+ print(f"Error closing MCP client {client_instance} on exit: {e}")
86
+ mcp_client_instances = []
87
+ print("MCP client cleanup finished.")
88
+
89
+ atexit.register(cleanup_mcp_client_instances_on_exit)
90
+ # --- End MCP Client Integration ---
91
+
92
+ # Function to encode image to base64 (remains the same)
93
  def encode_image(image_path):
94
  if not image_path:
95
+ print("No image path provided")
96
  return None
97
 
98
  try:
99
+ print(f"Encoding image from path: {image_path}")
 
 
100
  if isinstance(image_path, Image.Image):
101
  image = image_path
102
  else:
 
103
  image = Image.open(image_path)
104
 
 
105
  if image.mode == 'RGBA':
106
  image = image.convert('RGB')
107
 
 
108
  buffered = io.BytesIO()
109
  image.save(buffered, format="JPEG")
110
  img_str = base64.b64encode(buffered.getvalue()).decode("utf-8")
111
+ print("Image encoded successfully")
112
  return img_str
113
  except Exception as e:
114
+ print(f"Error encoding image: {e}")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
115
  return None
116
 
117
+ # Modified respond function
118
  def respond(
119
+ message_input_text, # From multimodal textbox's text part
120
+ image_files_list, # From multimodal textbox's files part
121
  history: list[tuple[str, str]],
122
  system_message,
123
  max_tokens,
 
128
  provider,
129
  custom_api_key,
130
  custom_model,
131
+ model_search_term, # Not directly used in this function but passed by UI
132
+ selected_model # From radio
 
 
133
  ):
134
+ global mcp_tools_collection # Access the loaded MCP tools
135
+
136
+ print(f"Received message text: {message_input_text}")
137
+ print(f"Received {len(image_files_list) if image_files_list else 0} images")
138
+ # ... (keep other prints for debugging)
139
+
 
 
 
 
 
 
 
 
140
  token_to_use = custom_api_key if custom_api_key.strip() != "" else ACCESS_TOKEN
141
+ hf_inference_client = InferenceClient(token=token_to_use, provider=provider)
142
+ print(f"Hugging Face Inference Client initialized with {provider} provider.")
143
+
144
+ if seed == -1: seed = None
145
+
146
+ # --- Prepare current user message (potentially multimodal) ---
147
+ current_user_content_parts = []
148
+ if message_input_text and message_input_text.strip():
149
+ current_user_content_parts.append({"type": "text", "text": message_input_text.strip()})
150
 
151
+ if image_files_list:
152
+ for img_path in image_files_list:
153
+ if img_path: # img_path is the path to the uploaded file
154
+ encoded_img = encode_image(img_path)
155
+ if encoded_img:
156
+ current_user_content_parts.append({
157
+ "type": "image_url",
158
+ "image_url": {"url": f"data:image/jpeg;base64,{encoded_img}"}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
159
  })
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
160
 
161
+ if not current_user_content_parts: # If message is truly empty
162
+ print("Skipping empty message.")
163
+ for item in history: yield item # hack to make gradio update with history
164
+ return
165
 
166
+ # --- Construct messages for LLM ---
167
+ llm_messages = [{"role": "system", "content": system_message}]
168
+ for hist_user, hist_assistant in history:
169
+ # Assuming history user part is already formatted (string or list of dicts)
170
+ if hist_user:
171
+ # Handle complex history items (tuples of text, list_of_image_paths)
172
+ if isinstance(hist_user, tuple) and len(hist_user) == 2:
173
+ hist_user_text, hist_user_images = hist_user
174
+ hist_user_parts = []
175
+ if hist_user_text: hist_user_parts.append({"type": "text", "text": hist_user_text})
176
+ for img_p in hist_user_images:
177
+ enc_img = encode_image(img_p)
178
+ if enc_img: hist_user_parts.append({"type": "image_url", "image_url": {"url": f"data:image/jpeg;base64,{enc_img}"}})
179
+ if hist_user_parts: llm_messages.append({"role": "user", "content": hist_user_parts})
180
+ elif isinstance(hist_user, str): # Simple text history
181
+ llm_messages.append({"role": "user", "content": hist_user})
182
+ # else: could be already formatted list of dicts from previous multimodal turn
183
+
184
+ if hist_assistant:
185
+ llm_messages.append({"role": "assistant", "content": hist_assistant})
186
+
187
+ llm_messages.append({"role": "user", "content": current_user_content_parts if len(current_user_content_parts) > 1 else current_user_content_parts[0] if current_user_content_parts else ""})
188
+
189
+ model_to_use = custom_model.strip() if custom_model.strip() else selected_model
190
+ print(f"Model selected for inference: {model_to_use}")
191
+
192
+ # --- Agent Logic or Direct LLM Call ---
193
+ active_mcp_tools = list(mcp_tools_collection.tools) if mcp_tools_collection else []
194
+
195
+ if active_mcp_tools:
196
+ print(f"MCP tools are active ({len(active_mcp_tools)} tools). Using CodeAgent.")
197
+
198
+ # Wrapper for smolagents.CodeAgent to use our configured HF InferenceClient
199
+ class HFClientWrapperForAgent:
200
+ def __init__(self, hf_client, model_id, outer_scope_params):
201
+ self.client = hf_client
202
+ self.model_id = model_id
203
+ self.params = outer_scope_params
204
+
205
+ def generate(self, agent_llm_messages, tools=None, tool_choice=None, **kwargs):
206
+ # agent_llm_messages is from the agent. tools/tool_choice also from agent.
207
+ api_params = {
208
+ "model": self.model_id,
209
+ "messages": agent_llm_messages,
210
+ "stream": False, # CodeAgent's .run() expects a full response object
211
+ "max_tokens": self.params['max_tokens'],
212
+ "temperature": self.params['temperature'],
213
+ "top_p": self.params['top_p'],
214
+ "frequency_penalty": self.params['frequency_penalty'],
215
+ }
216
+ if self.params['seed'] is not None: api_params["seed"] = self.params['seed']
217
+ if tools: api_params["tools"] = tools
218
+ if tool_choice: api_params["tool_choice"] = tool_choice
219
+
220
+ print(f"Agent's HFClientWrapper calling LLM: {self.model_id}")
221
+ completion = self.client.chat_completion(**api_params)
222
+ return completion
223
 
224
+ outer_scope_llm_params = {
225
+ "max_tokens": max_tokens, "temperature": temperature, "top_p": top_p,
226
+ "frequency_penalty": frequency_penalty, "seed": seed
227
+ }
228
+ agent_model_adapter = HFClientWrapperForAgent(hf_inference_client, model_to_use, outer_scope_llm_params)
229
+
230
+ agent = CodeAgent(tools=active_mcp_tools, model=agent_model_adapter)
231
+
232
+ # Prime agent with history (all messages except the current user query)
233
+ agent.messages = llm_messages[:-1]
234
+
235
+ # CodeAgent.run expects a string query. Extract text from current user message.
236
+ current_query_for_agent = message_input_text.strip() if message_input_text else "User provided image(s)."
237
+ if not current_query_for_agent and image_files_list: # If only image, provide a generic text
238
+ current_query_for_agent = "Describe the image(s) or follow instructions related to them."
239
+ elif not current_query_for_agent and not image_files_list: # Should not happen due to earlier check
240
+ current_query_for_agent = "..."
241
 
242
+
243
+ print(f"Query for CodeAgent.run: '{current_query_for_agent}' with {len(agent.messages)} history messages.")
244
+ try:
245
+ agent_final_text_response = agent.run(current_query_for_agent)
246
+ # Note: agent.run() is blocking and returns the final string.
247
+ # It won't stream token by token if tools are used.
248
+ yield agent_final_text_response
249
+ print("Completed response generation via CodeAgent.")
250
+ except Exception as e:
251
+ print(f"Error during CodeAgent execution: {e}")
252
+ yield f"Error using tools: {str(e)}"
253
+ return
254
+
255
+ else: # No MCP tools, use original streaming logic
256
+ print("No MCP tools active. Proceeding with direct LLM call (streaming).")
257
+ response_stream_content = ""
258
+ try:
259
+ stream = hf_inference_client.chat_completion(
260
+ model=model_to_use,
261
+ messages=llm_messages,
262
+ stream=True,
263
+ max_tokens=max_tokens, temperature=temperature, top_p=top_p,
264
+ frequency_penalty=frequency_penalty, seed=seed
265
+ )
266
+ for chunk in stream:
267
+ if hasattr(chunk, 'choices') and len(chunk.choices) > 0:
268
+ delta = chunk.choices[0].delta
269
+ if hasattr(delta, 'content') and delta.content:
270
+ token_text = delta.content
271
+ response_stream_content += token_text
272
+ yield response_stream_content
273
+ print("\nCompleted streaming response generation.")
274
+ except Exception as e:
275
+ print(f"Error during direct LLM inference: {e}")
276
+ yield response_stream_content + f"\nError: {str(e)}"
277
+
278
+ # Function to validate provider (remains the same)
279
  def validate_provider(api_key, provider):
280
  if not api_key.strip() and provider != "hf-inference":
281
  return gr.update(value="hf-inference")
282
  return gr.update(value=provider)
283
 
 
 
 
 
 
284
  # GRADIO UI
285
  with gr.Blocks(theme="Nymbo/Nymbo_Theme") as demo:
 
286
  chatbot = gr.Chatbot(
287
+ label="Serverless TextGen Hub",
288
+ height=600, show_copy_button=True,
289
+ placeholder="Select a model, (optionally) load MCP Tools, and begin chatting.",
290
+ layout="panel",
291
+ bubble_full_width=False
292
  )
 
293
 
294
+ msg_input_box = gr.MultimodalTextbox(
 
295
  placeholder="Type a message or upload images...",
296
+ show_label=False, container=False, scale=12,
297
+ file_types=["image"], file_count="multiple", sources=["upload"]
 
 
 
 
298
  )
299
 
 
300
  with gr.Accordion("Settings", open=False):
301
+ system_message_box = gr.Textbox(value="You are a helpful AI assistant.", label="System Prompt")
 
 
 
 
 
 
 
302
  with gr.Row():
303
+ # ... (max_tokens, temperature, top_p sliders remain the same)
304
+ max_tokens_slider = gr.Slider(1, 4096, value=512, step=1, label="Max tokens")
305
+ temperature_slider = gr.Slider(0.1, 4.0, value=0.7, step=0.1, label="Temperature")
306
+ top_p_slider = gr.Slider(0.1, 1.0, value=0.95, step=0.05, label="Top-P")
307
+ with gr.Row():
308
+ # ... (frequency_penalty, seed sliders remain the same)
309
+ frequency_penalty_slider = gr.Slider(-2.0, 2.0, value=0.0, step=0.1, label="Frequency Penalty")
310
+ seed_slider = gr.Slider(-1, 65535, value=-1, step=1, label="Seed (-1 for random)")
311
+
312
+ providers_list = ["hf-inference", "cerebras", "together", "sambanova", "novita", "cohere", "fireworks-ai", "hyperbolic", "nebius"]
313
+ provider_radio = gr.Radio(choices=providers_list, value="hf-inference", label="Inference Provider")
314
+ byok_textbox = gr.Textbox(label="BYOK (Hugging Face API Key)", type="password", placeholder="Enter token if not using 'hf-inference'")
315
+ custom_model_box = gr.Textbox(label="Custom Model ID", placeholder="org/model-name (overrides selection below)")
316
+ model_search_box = gr.Textbox(label="Filter Featured Models", placeholder="Search...")
317
+
318
+ models_list = [ # Keep your extensive model list
319
+ "meta-llama/Llama-3.2-11B-Vision-Instruct", "meta-llama/Llama-3.3-70B-Instruct",
320
+ # ... (include all your models) ...
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
321
  "microsoft/Phi-3-mini-4k-instruct",
322
  ]
323
+ featured_model_radio = gr.Radio(label="Select a Featured Model", choices=models_list, value="meta-llama/Llama-3.2-11B-Vision-Instruct", interactive=True)
324
+ gr.Markdown("[All Text models](https://huggingface.co/models?pipeline_tag=text-generation) | [All Multimodal models](https://huggingface.co/models?pipeline_tag=image-text-to-text)")
325
 
326
+ # --- MCP Client Settings UI ---
327
+ with gr.Accordion("MCP Client Settings (Connect to External Tools)", open=False):
328
+ gr.Markdown("Configure connections to MCP Servers to allow the LLM to use external tools. The LLM will decide when to use these tools based on your prompts.")
329
+ mcp_server_config_input = gr.Textbox(
330
+ label="MCP Server Configurations (JSON Array)",
331
+ info='Example: [{"name": "MyToolServer", "type": "sse", "url": "http://server_url/gradio_api/mcp/sse"}]',
332
+ lines=3,
333
+ placeholder='Enter a JSON list of server configurations here.',
334
+ value=json.dumps(DEFAULT_MCP_SERVERS, indent=2) # Pre-fill with defaults
335
  )
336
+ mcp_load_status_display = gr.Textbox(label="MCP Load Status", interactive=False)
337
+ load_mcp_tools_btn = gr.Button("Load/Reload MCP Tools")
338
 
339
+ def handle_load_mcp_tools_click(config_str_from_ui):
340
+ if not config_str_from_ui:
341
+ load_mcp_tools([]) # Clear tools if config is empty
342
+ return "MCP tool loading attempted with empty config. Tools cleared."
343
+ try:
344
+ parsed_configs = json.loads(config_str_from_ui)
345
+ if not isinstance(parsed_configs, list):
346
+ return "Error: MCP configuration must be a valid JSON list."
347
+ load_mcp_tools(parsed_configs) # Call the main loading function
348
+
349
+ if mcp_tools_collection and len(mcp_tools_collection.tools) > 0:
350
+ loaded_tool_names = [t.name for t in mcp_tools_collection.tools]
351
+ return f"Successfully loaded {len(loaded_tool_names)} MCP tools: {', '.join(loaded_tool_names)}"
352
+ else:
353
+ return "No MCP tools loaded, or an error occurred during loading. Check console for details."
354
+ except json.JSONDecodeError:
355
+ return "Error: Invalid JSON format in MCP server configurations."
356
+ except Exception as e:
357
+ print(f"Unhandled error in handle_load_mcp_tools_click: {e}")
358
+ return f"Error loading MCP tools: {str(e)}. Check console."
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
359
 
360
+ load_mcp_tools_btn.click(
361
+ handle_load_mcp_tools_click,
362
+ inputs=[mcp_server_config_input],
363
+ outputs=mcp_load_status_display
364
+ )
365
+ # --- End MCP Client Settings UI ---
366
+
367
+ # Chat history state (remains the same)
368
+ # chat_history = gr.State([]) # Not explicitly used if chatbot manages history directly
369
+
370
+ # Function to filter models (remains the same)
371
  def filter_models(search_term):
372
+ return gr.update(choices=[m for m in models_list if search_term.lower() in m.lower()])
 
 
 
373
 
374
+ # Function to set custom model from radio (remains the same)
375
  def set_custom_model_from_radio(selected):
376
+ return selected # Updates custom_model_box with the selected featured model
 
377
 
378
+ # Gradio's MultimodalTextbox submit action
379
+ # The `user` function is simplified as msg_input_box directly gives text and files
380
+ # The `bot` function is where the main logic of `respond` is called.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
381
 
382
+ def handle_submit(msg_content_dict, current_chat_history):
383
+ # msg_content_dict = {"text": "...", "files": ["path1", "path2"]}
384
+ text = msg_content_dict.get("text", "")
385
+ files = msg_content_dict.get("files", [])
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
386
 
387
+ # Add user message to history for display
388
+ # For multimodal, we might want to display text and images separately or combined
389
+ user_display_entry = []
390
+ if text:
391
+ user_display_entry.append(text)
392
+ if files:
393
+ # For display, Gradio chatbot can render markdown images
394
+ for f_path in files:
395
+ user_display_entry.append(f"![{os.path.basename(f_path)}]({f_path})")
396
+
397
+ # Construct a representation for history that `respond` can unpack
398
+ # For simplicity, let's pass text and files separately to `respond`
399
+ # and the history will store the user input as (text, files_list_for_display)
400
+
401
+ history_entry_user_part = (text, files) # Store as tuple for `respond` to process easily later
402
+ current_chat_history.append([history_entry_user_part, None]) # Add user part, assistant is None for now
403
+
404
+ # Prepare for streaming response
405
+ # The `respond` function is a generator
406
+ assistant_response_accumulator = ""
407
+ for streamed_chunk in respond(
408
+ text, files,
409
+ current_chat_history[:-1], # Pass history *before* current turn
410
+ system_message_box.value, max_tokens_slider.value, temperature_slider.value,
411
+ top_p_slider.value, frequency_penalty_slider.value, seed_slider.value,
412
+ provider_radio.value, byok_textbox.value, custom_model_box.value,
413
+ model_search_box.value, featured_model_radio.value
414
+ ):
415
+ assistant_response_accumulator = streamed_chunk
416
+ current_chat_history[-1][1] = assistant_response_accumulator # Update last assistant message
417
+ yield current_chat_history, {"text": "", "files": []} # Update chatbot, clear input
418
+
419
+ # Final update after stream (already done by last yield)
420
+ # yield current_chat_history, {"text": "", "files": []}
421
 
422
+
423
+ msg_input_box.submit(
424
+ handle_submit,
425
+ [msg_input_box, chatbot],
426
+ [chatbot, msg_input_box] # Output to chatbot and clear msg_input_box
427
  )
 
428
 
429
+ model_search_box.change(filter_models, model_search_box, featured_model_radio)
430
+ featured_model_radio.change(set_custom_model_from_radio, featured_model_radio, custom_model_box)
431
+ byok_textbox.change(validate_provider, [byok_textbox, provider_radio], provider_radio)
432
+ provider_radio.change(validate_provider, [byok_textbox, provider_radio], provider_radio)
 
 
433
 
434
+ # Load default MCP tools on startup
435
+ load_mcp_tools(DEFAULT_MCP_SERVERS)
436
+ print(f"Initial MCP tools loaded: {len(mcp_tools_collection.tools) if mcp_tools_collection else 0} tools.")
437
 
438
+ print("Gradio interface initialized.")
439
  if __name__ == "__main__":
440
+ print("Launching the Serverless TextGen Hub demo application.")
441
+ demo.launch(show_api=False) # show_api can be True if needed for other purposes