Nymbo commited on
Commit
d2ae72a
·
verified ·
1 Parent(s): a82c008

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +18 -324
app.py CHANGED
@@ -5,8 +5,6 @@ import json
5
  import base64
6
  from PIL import Image
7
  import io
8
- import requests
9
- from smolagents.mcp_client import MCPClient
10
 
11
  ACCESS_TOKEN = os.getenv("HF_TOKEN")
12
  print("Access token loaded.")
@@ -41,154 +39,9 @@ def encode_image(image_path):
41
  print(f"Error encoding image: {e}")
42
  return None
43
 
44
- # Dictionary to store active MCP connections
45
- mcp_connections = {}
46
-
47
- def connect_to_mcp_server(server_url, server_name=None):
48
- """Connect to an MCP server and return available tools"""
49
- if not server_url:
50
- return None, "No server URL provided"
51
-
52
- try:
53
- # Create an MCP client and connect to the server
54
- client = MCPClient({"url": server_url})
55
- # Get available tools
56
- tools = client.get_tools()
57
-
58
- # Store the connection for later use
59
- name = server_name or f"Server_{len(mcp_connections)}"
60
- mcp_connections[name] = {"client": client, "tools": tools, "url": server_url}
61
-
62
- return name, f"Successfully connected to {name} with {len(tools)} available tools"
63
- except Exception as e:
64
- print(f"Error connecting to MCP server: {e}")
65
- return None, f"Error connecting to MCP server: {str(e)}"
66
-
67
- def list_mcp_tools(server_name):
68
- """List available tools for a connected MCP server"""
69
- if server_name not in mcp_connections:
70
- return "Server not connected"
71
-
72
- tools = mcp_connections[server_name]["tools"]
73
- tool_info = []
74
- for tool in tools:
75
- tool_info.append(f"- {tool.name}: {tool.description}")
76
-
77
- if not tool_info:
78
- return "No tools available for this server"
79
-
80
- return "\n".join(tool_info)
81
-
82
- def call_mcp_tool(server_name, tool_name, **kwargs):
83
- """Call a specific tool from an MCP server"""
84
- if server_name not in mcp_connections:
85
- return f"Server '{server_name}' not connected"
86
-
87
- client = mcp_connections[server_name]["client"]
88
- tools = mcp_connections[server_name]["tools"]
89
-
90
- # Find the requested tool
91
- tool = next((t for t in tools if t.name == tool_name), None)
92
- if not tool:
93
- return f"Tool '{tool_name}' not found on server '{server_name}'"
94
-
95
- try:
96
- # Call the tool with provided arguments
97
- result = client.call_tool(tool_name, kwargs)
98
- return result
99
- except Exception as e:
100
- print(f"Error calling MCP tool: {e}")
101
- return f"Error calling MCP tool: {str(e)}"
102
-
103
- def analyze_message_for_tool_call(message, active_mcp_servers, client, model_to_use, system_message):
104
- """Analyze a message to determine if an MCP tool should be called"""
105
- # Skip analysis if message is empty
106
- if not message or not message.strip():
107
- return None, None
108
-
109
- # Get information about available tools
110
- tool_info = []
111
- for server_name in active_mcp_servers:
112
- if server_name in mcp_connections:
113
- server_tools = mcp_connections[server_name]["tools"]
114
- for tool in server_tools:
115
- tool_info.append({
116
- "server_name": server_name,
117
- "tool_name": tool.name,
118
- "description": tool.description
119
- })
120
-
121
- if not tool_info:
122
- return None, None
123
-
124
- # Create a structured query for the LLM to analyze if a tool call is needed
125
- tools_desc = []
126
- for info in tool_info:
127
- tools_desc.append(f"{info['server_name']}.{info['tool_name']}: {info['description']}")
128
-
129
- tools_string = "\n".join(tools_desc)
130
-
131
- analysis_system_prompt = f"""You are an assistant that helps determine if a user message requires using an external tool.
132
- Available tools:
133
- {tools_string}
134
-
135
- Your job is to:
136
- 1. Analyze the user's message
137
- 2. Determine if they're asking to use one of the tools
138
- 3. If yes, respond with a JSON object with the server_name, tool_name, and parameters
139
- 4. If no, respond with "NO_TOOL_NEEDED"
140
-
141
- Example 1:
142
- User: "Please turn this text into speech: Hello world"
143
- Response: {{"server_name": "kokoroTTS", "tool_name": "text_to_audio", "parameters": {{"text": "Hello world", "speed": 1.0}}}}
144
-
145
- Example 2:
146
- User: "What is the capital of France?"
147
- Response: NO_TOOL_NEEDED"""
148
-
149
- try:
150
- # Call the LLM to analyze the message
151
- response = client.chat_completion(
152
- model=model_to_use,
153
- messages=[
154
- {"role": "system", "content": analysis_system_prompt},
155
- {"role": "user", "content": message}
156
- ],
157
- temperature=0.2, # Low temperature for more deterministic responses
158
- max_tokens=300
159
- )
160
-
161
- analysis = response.choices[0].message.content
162
- print(f"Tool analysis: {analysis}")
163
-
164
- if "NO_TOOL_NEEDED" in analysis:
165
- return None, None
166
-
167
- # Try to extract JSON from the response
168
- json_start = analysis.find("{")
169
- json_end = analysis.rfind("}") + 1
170
-
171
- if json_start < 0 or json_end <= 0:
172
- return None, None
173
-
174
- json_str = analysis[json_start:json_end]
175
- try:
176
- tool_call = json.loads(json_str)
177
- return tool_call.get("server_name"), {
178
- "tool_name": tool_call.get("tool_name"),
179
- "parameters": tool_call.get("parameters", {})
180
- }
181
- except json.JSONDecodeError:
182
- print(f"Failed to parse tool call JSON: {json_str}")
183
- return None, None
184
-
185
- except Exception as e:
186
- print(f"Error analyzing message for tool calls: {str(e)}")
187
- return None, None
188
-
189
  def respond(
190
  message,
191
- image_files,
192
  history: list[tuple[str, str]],
193
  system_message,
194
  max_tokens,
@@ -200,10 +53,7 @@ def respond(
200
  custom_api_key,
201
  custom_model,
202
  model_search_term,
203
- selected_model,
204
- mcp_enabled=False,
205
- active_mcp_servers=None,
206
- mcp_interaction_mode="Natural Language"
207
  ):
208
  print(f"Received message: {message}")
209
  print(f"Received {len(image_files) if image_files else 0} images")
@@ -216,9 +66,6 @@ def respond(
216
  print(f"Selected model (custom_model): {custom_model}")
217
  print(f"Model search term: {model_search_term}")
218
  print(f"Selected model from radio: {selected_model}")
219
- print(f"MCP enabled: {mcp_enabled}")
220
- print(f"Active MCP servers: {active_mcp_servers}")
221
- print(f"MCP interaction mode: {mcp_interaction_mode}")
222
 
223
  # Determine which token to use
224
  token_to_use = custom_api_key if custom_api_key.strip() != "" else ACCESS_TOKEN
@@ -235,58 +82,6 @@ def respond(
235
  # Convert seed to None if -1 (meaning random)
236
  if seed == -1:
237
  seed = None
238
-
239
- # Determine which model to use
240
- model_to_use = custom_model.strip() if custom_model.strip() != "" else selected_model
241
- print(f"Model selected for inference: {model_to_use}")
242
-
243
- # Process MCP commands in command mode
244
- if mcp_enabled and message:
245
- if message.startswith("/mcp"): # Always handle explicit commands
246
- # Handle MCP command
247
- command_parts = message.split(" ", 3)
248
- if len(command_parts) < 3:
249
- return "Invalid MCP command. Format: /mcp <server_name> <tool_name> [arguments]"
250
-
251
- _, server_name, tool_name = command_parts[:3]
252
- args_json = "{}" if len(command_parts) < 4 else command_parts[3]
253
-
254
- try:
255
- args_dict = json.loads(args_json)
256
- result = call_mcp_tool(server_name, tool_name, **args_dict)
257
- if isinstance(result, dict):
258
- return json.dumps(result, indent=2)
259
- return str(result)
260
- except json.JSONDecodeError:
261
- return f"Invalid JSON arguments: {args_json}"
262
- except Exception as e:
263
- return f"Error executing MCP command: {str(e)}"
264
- elif mcp_interaction_mode == "Natural Language" and active_mcp_servers:
265
- # Use natural language processing to detect tool calls
266
- server_name, tool_info = analyze_message_for_tool_call(
267
- message,
268
- active_mcp_servers,
269
- client,
270
- model_to_use,
271
- system_message
272
- )
273
-
274
- if server_name and tool_info:
275
- try:
276
- # Call the detected tool
277
- print(f"Calling tool via natural language: {server_name}.{tool_info['tool_name']} with parameters: {tool_info['parameters']}")
278
- result = call_mcp_tool(server_name, tool_info['tool_name'], **tool_info['parameters'])
279
-
280
- # Format the response to include what was done
281
- if isinstance(result, dict):
282
- result_str = json.dumps(result, indent=2)
283
- else:
284
- result_str = str(result)
285
-
286
- return f"I used the {tool_info['tool_name']} tool from {server_name} with your request.\n\nResult:\n{result_str}"
287
- except Exception as e:
288
- print(f"Error executing MCP tool via natural language: {str(e)}")
289
- # Continue with normal response if tool call fails
290
 
291
  # Create multimodal content if images are present
292
  if image_files and len(image_files) > 0:
@@ -319,25 +114,8 @@ def respond(
319
  # Text-only message
320
  user_content = message
321
 
322
- # Add information about available MCP tools to the system message if MCP is enabled
323
- augmented_system_message = system_message
324
- if mcp_enabled and active_mcp_servers:
325
- tool_info = []
326
- for server_name in active_mcp_servers:
327
- if server_name in mcp_connections:
328
- server_tools = list_mcp_tools(server_name).split("\n")
329
- tool_info.extend([f"{server_name}: {tool}" for tool in server_tools])
330
-
331
- if tool_info:
332
- mcp_tools_description = "\n".join(tool_info)
333
-
334
- if mcp_interaction_mode == "Command Mode":
335
- augmented_system_message += f"\n\nYou have access to the following MCP tools:\n{mcp_tools_description}\n\nTo use these tools, the user can type a command in the format: /mcp <server_name> <tool_name> <arguments_json>"
336
- else:
337
- augmented_system_message += f"\n\nYou have access to the following MCP tools:\n{mcp_tools_description}\n\nThe user can use these tools by describing what they want in natural language, and the system will automatically detect when to use a tool based on their request."
338
-
339
  # Prepare messages in the format expected by the API
340
- messages = [{"role": "system", "content": augmented_system_message}]
341
  print("Initial messages array constructed.")
342
 
343
  # Add conversation history to the context
@@ -433,13 +211,19 @@ def respond(
433
 
434
  print("Completed response generation.")
435
 
 
 
 
 
 
 
436
  # GRADIO UI
437
  with gr.Blocks(theme="Nymbo/Nymbo_Theme") as demo:
438
  # Create the chatbot component
439
  chatbot = gr.Chatbot(
440
  height=600,
441
  show_copy_button=True,
442
- placeholder="Select a model and begin chatting. Now supports multiple inference providers, multimodal inputs, and MCP tools",
443
  layout="panel"
444
  )
445
  print("Chatbot interface created.")
@@ -455,6 +239,8 @@ with gr.Blocks(theme="Nymbo/Nymbo_Theme") as demo:
455
  sources=["upload"]
456
  )
457
 
 
 
458
  # Create accordion for settings
459
  with gr.Accordion("Settings", open=False):
460
  # System message
@@ -588,69 +374,6 @@ with gr.Blocks(theme="Nymbo/Nymbo_Theme") as demo:
588
 
589
  gr.Markdown("[View all Text-to-Text models](https://huggingface.co/models?inference_provider=all&pipeline_tag=text-generation&sort=trending) | [View all multimodal models](https://huggingface.co/models?inference_provider=all&pipeline_tag=image-text-to-text&sort=trending)")
590
 
591
- # Create accordion for MCP settings
592
- with gr.Accordion("MCP Settings", open=False):
593
- mcp_enabled_checkbox = gr.Checkbox(
594
- label="Enable MCP Support",
595
- value=False,
596
- info="Enable Model Context Protocol support to connect to external tools and services"
597
- )
598
-
599
- with gr.Row():
600
- mcp_server_url = gr.Textbox(
601
- label="MCP Server URL",
602
- placeholder="https://example-mcp-server.hf.space/gradio_api/mcp/sse",
603
- info="URL of the MCP server to connect to"
604
- )
605
-
606
- mcp_server_name = gr.Textbox(
607
- label="Server Name",
608
- placeholder="Optional name for this server",
609
- info="A friendly name to identify this server"
610
- )
611
-
612
- mcp_connect_button = gr.Button("Connect to MCP Server")
613
-
614
- mcp_status = gr.Textbox(
615
- label="MCP Connection Status",
616
- placeholder="No MCP servers connected",
617
- interactive=False
618
- )
619
-
620
- active_mcp_servers = gr.Dropdown(
621
- label="Active MCP Servers",
622
- choices=[],
623
- multiselect=True,
624
- info="Select which MCP servers to use in chat"
625
- )
626
-
627
- mcp_mode = gr.Radio(
628
- label="MCP Interaction Mode",
629
- choices=["Natural Language", "Command Mode"],
630
- value="Natural Language",
631
- info="Choose how to interact with MCP tools"
632
- )
633
-
634
- gr.Markdown("""
635
- ### MCP Interaction Modes
636
-
637
- **Natural Language Mode**: Simply describe what you want in plain English. Examples:
638
- ```
639
- Please convert the text "Hello world" to speech
640
- Can you read this text aloud: "Welcome to MCP integration"
641
- ```
642
-
643
- **Command Mode**: Use structured commands (for advanced users)
644
- ```
645
- /mcp <server_name> <tool_name> {"param1": "value1", "param2": "value2"}
646
- ```
647
-
648
- Example:
649
- ```
650
- /mcp kokoroTTS text_to_audio {"text": "Hello world", "speed": 1.0}
651
- ```
652
- """)
653
-
654
  # Chat history state
655
  chat_history = gr.State([])
656
 
@@ -666,16 +389,6 @@ with gr.Blocks(theme="Nymbo/Nymbo_Theme") as demo:
666
  print(f"Featured model selected: {selected}")
667
  return selected
668
 
669
- # Function to connect to MCP server
670
- def connect_mcp_server(url, name):
671
- server_name, status = connect_to_mcp_server(url, name)
672
-
673
- # Update the active servers dropdown
674
- servers = list(mcp_connections.keys())
675
-
676
- # Return the status message and updated server list
677
- return status, gr.update(choices=servers)
678
-
679
  # Function for the chat interface
680
  def user(user_message, history):
681
  # Debug logging for troubleshooting
@@ -721,7 +434,7 @@ with gr.Blocks(theme="Nymbo/Nymbo_Theme") as demo:
721
  return history
722
 
723
  # Define bot response function
724
- def bot(history, system_msg, max_tokens, temperature, top_p, freq_penalty, seed, provider, api_key, custom_model, search_term, selected_model, mcp_enabled, selected_servers, mcp_interaction_mode):
725
  # Check if history is valid
726
  if not history or len(history) == 0:
727
  print("No history to process")
@@ -772,10 +485,7 @@ with gr.Blocks(theme="Nymbo/Nymbo_Theme") as demo:
772
  api_key,
773
  custom_model,
774
  search_term,
775
- selected_model,
776
- mcp_enabled,
777
- selected_servers,
778
- mcp_interaction_mode
779
  ):
780
  history[-1][1] = response
781
  yield history
@@ -795,21 +505,12 @@ with gr.Blocks(theme="Nymbo/Nymbo_Theme") as demo:
795
  api_key,
796
  custom_model,
797
  search_term,
798
- selected_model,
799
- mcp_enabled,
800
- selected_servers,
801
- mcp_interaction_mode
802
  ):
803
  history[-1][1] = response
804
  yield history
805
 
806
- # Update function for provider validation based on BYOK
807
- def validate_provider(api_key, provider):
808
- if not api_key.strip() and provider != "hf-inference":
809
- return gr.update(value="hf-inference")
810
- return gr.update(value=provider)
811
-
812
- # Event handlers
813
  msg.submit(
814
  user,
815
  [msg, chatbot],
@@ -819,7 +520,7 @@ with gr.Blocks(theme="Nymbo/Nymbo_Theme") as demo:
819
  bot,
820
  [chatbot, system_message_box, max_tokens_slider, temperature_slider, top_p_slider,
821
  frequency_penalty_slider, seed_slider, provider_radio, byok_textbox, custom_model_box,
822
- model_search_box, featured_model_radio, mcp_enabled_checkbox, active_mcp_servers, mcp_mode],
823
  [chatbot]
824
  ).then(
825
  lambda: {"text": "", "files": []}, # Clear inputs after submission
@@ -827,13 +528,6 @@ with gr.Blocks(theme="Nymbo/Nymbo_Theme") as demo:
827
  [msg]
828
  )
829
 
830
- # Connect MCP connect button
831
- mcp_connect_button.click(
832
- connect_mcp_server,
833
- [mcp_server_url, mcp_server_name],
834
- [mcp_status, active_mcp_servers]
835
- )
836
-
837
  # Connect the model filter to update the radio choices
838
  model_search_box.change(
839
  fn=filter_models,
@@ -870,4 +564,4 @@ print("Gradio interface initialized.")
870
 
871
  if __name__ == "__main__":
872
  print("Launching the demo application.")
873
- demo.launch(show_api=True, mcp_server=False) # Not launching as MCP server as we're the client
 
5
  import base64
6
  from PIL import Image
7
  import io
 
 
8
 
9
  ACCESS_TOKEN = os.getenv("HF_TOKEN")
10
  print("Access token loaded.")
 
39
  print(f"Error encoding image: {e}")
40
  return None
41
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
42
  def respond(
43
  message,
44
+ image_files, # Changed parameter name and structure
45
  history: list[tuple[str, str]],
46
  system_message,
47
  max_tokens,
 
53
  custom_api_key,
54
  custom_model,
55
  model_search_term,
56
+ selected_model
 
 
 
57
  ):
58
  print(f"Received message: {message}")
59
  print(f"Received {len(image_files) if image_files else 0} images")
 
66
  print(f"Selected model (custom_model): {custom_model}")
67
  print(f"Model search term: {model_search_term}")
68
  print(f"Selected model from radio: {selected_model}")
 
 
 
69
 
70
  # Determine which token to use
71
  token_to_use = custom_api_key if custom_api_key.strip() != "" else ACCESS_TOKEN
 
82
  # Convert seed to None if -1 (meaning random)
83
  if seed == -1:
84
  seed = None
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
85
 
86
  # Create multimodal content if images are present
87
  if image_files and len(image_files) > 0:
 
114
  # Text-only message
115
  user_content = message
116
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
117
  # Prepare messages in the format expected by the API
118
+ messages = [{"role": "system", "content": system_message}]
119
  print("Initial messages array constructed.")
120
 
121
  # Add conversation history to the context
 
211
 
212
  print("Completed response generation.")
213
 
214
+ # Function to validate provider selection based on BYOK
215
+ def validate_provider(api_key, provider):
216
+ if not api_key.strip() and provider != "hf-inference":
217
+ return gr.update(value="hf-inference")
218
+ return gr.update(value=provider)
219
+
220
  # GRADIO UI
221
  with gr.Blocks(theme="Nymbo/Nymbo_Theme") as demo:
222
  # Create the chatbot component
223
  chatbot = gr.Chatbot(
224
  height=600,
225
  show_copy_button=True,
226
+ placeholder="Select a model and begin chatting. Now supports multiple inference providers and multimodal inputs",
227
  layout="panel"
228
  )
229
  print("Chatbot interface created.")
 
239
  sources=["upload"]
240
  )
241
 
242
+ # Note: We're removing the separate submit button since MultimodalTextbox has its own
243
+
244
  # Create accordion for settings
245
  with gr.Accordion("Settings", open=False):
246
  # System message
 
374
 
375
  gr.Markdown("[View all Text-to-Text models](https://huggingface.co/models?inference_provider=all&pipeline_tag=text-generation&sort=trending) | [View all multimodal models](https://huggingface.co/models?inference_provider=all&pipeline_tag=image-text-to-text&sort=trending)")
376
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
377
  # Chat history state
378
  chat_history = gr.State([])
379
 
 
389
  print(f"Featured model selected: {selected}")
390
  return selected
391
 
 
 
 
 
 
 
 
 
 
 
392
  # Function for the chat interface
393
  def user(user_message, history):
394
  # Debug logging for troubleshooting
 
434
  return history
435
 
436
  # Define bot response function
437
+ def bot(history, system_msg, max_tokens, temperature, top_p, freq_penalty, seed, provider, api_key, custom_model, search_term, selected_model):
438
  # Check if history is valid
439
  if not history or len(history) == 0:
440
  print("No history to process")
 
485
  api_key,
486
  custom_model,
487
  search_term,
488
+ selected_model
 
 
 
489
  ):
490
  history[-1][1] = response
491
  yield history
 
505
  api_key,
506
  custom_model,
507
  search_term,
508
+ selected_model
 
 
 
509
  ):
510
  history[-1][1] = response
511
  yield history
512
 
513
+ # Event handlers - only using the MultimodalTextbox's built-in submit functionality
 
 
 
 
 
 
514
  msg.submit(
515
  user,
516
  [msg, chatbot],
 
520
  bot,
521
  [chatbot, system_message_box, max_tokens_slider, temperature_slider, top_p_slider,
522
  frequency_penalty_slider, seed_slider, provider_radio, byok_textbox, custom_model_box,
523
+ model_search_box, featured_model_radio],
524
  [chatbot]
525
  ).then(
526
  lambda: {"text": "", "files": []}, # Clear inputs after submission
 
528
  [msg]
529
  )
530
 
 
 
 
 
 
 
 
531
  # Connect the model filter to update the radio choices
532
  model_search_box.change(
533
  fn=filter_models,
 
564
 
565
  if __name__ == "__main__":
566
  print("Launching the demo application.")
567
+ demo.launch(show_api=True)