Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -5,8 +5,6 @@ import json
|
|
5 |
import base64
|
6 |
from PIL import Image
|
7 |
import io
|
8 |
-
import requests # Retained, though not directly used in the core logic shown for modification
|
9 |
-
from smolagents.mcp_client import MCPClient
|
10 |
|
11 |
ACCESS_TOKEN = os.getenv("HF_TOKEN")
|
12 |
print("Access token loaded.")
|
@@ -41,174 +39,9 @@ def encode_image(image_path):
|
|
41 |
print(f"Error encoding image: {e}")
|
42 |
return None
|
43 |
|
44 |
-
# Dictionary to store active MCP connections
|
45 |
-
mcp_connections = {}
|
46 |
-
|
47 |
-
def connect_to_mcp_server(server_url, server_name=None):
|
48 |
-
"""Connect to an MCP server and return available tools"""
|
49 |
-
if not server_url:
|
50 |
-
return None, "No server URL provided"
|
51 |
-
|
52 |
-
try:
|
53 |
-
# Create an MCP client and connect to the server
|
54 |
-
client = MCPClient({"url": server_url})
|
55 |
-
# Get available tools
|
56 |
-
tools = client.get_tools()
|
57 |
-
|
58 |
-
# Store the connection for later use
|
59 |
-
name = server_name or f"Server_{len(mcp_connections)}_{base64.urlsafe_b64encode(os.urandom(3)).decode()}" # Ensure unique name
|
60 |
-
mcp_connections[name] = {"client": client, "tools": tools, "url": server_url}
|
61 |
-
|
62 |
-
return name, f"Successfully connected to {name} with {len(tools)} available tools"
|
63 |
-
except Exception as e:
|
64 |
-
print(f"Error connecting to MCP server: {e}")
|
65 |
-
return None, f"Error connecting to MCP server: {str(e)}"
|
66 |
-
|
67 |
-
def list_mcp_tools(server_name):
|
68 |
-
"""List available tools for a connected MCP server"""
|
69 |
-
if server_name not in mcp_connections:
|
70 |
-
return "Server not connected"
|
71 |
-
|
72 |
-
tools = mcp_connections[server_name]["tools"]
|
73 |
-
tool_info = []
|
74 |
-
for tool in tools:
|
75 |
-
tool_info.append(f"- {tool.name}: {tool.description}")
|
76 |
-
|
77 |
-
if not tool_info:
|
78 |
-
return "No tools available for this server"
|
79 |
-
|
80 |
-
return "\n".join(tool_info)
|
81 |
-
|
82 |
-
def call_mcp_tool(server_name, tool_name, **kwargs):
|
83 |
-
"""Call a specific tool from an MCP server"""
|
84 |
-
if server_name not in mcp_connections:
|
85 |
-
return f"Server '{server_name}' not connected"
|
86 |
-
|
87 |
-
client = mcp_connections[server_name]["client"]
|
88 |
-
tools = mcp_connections[server_name]["tools"]
|
89 |
-
|
90 |
-
# Find the requested tool
|
91 |
-
tool = next((t for t in tools if t.name == tool_name), None)
|
92 |
-
if not tool:
|
93 |
-
return f"Tool '{tool_name}' not found on server '{server_name}'"
|
94 |
-
|
95 |
-
try:
|
96 |
-
# Call the tool with provided arguments
|
97 |
-
# The mcp_client's call_tool is expected to return the direct result from the tool
|
98 |
-
result = client.call_tool(tool_name, kwargs)
|
99 |
-
|
100 |
-
# The result here could be a string (e.g. base64 audio), a dict, or other types
|
101 |
-
# depending on the MCP tool. The `respond` function will handle formatting.
|
102 |
-
return result
|
103 |
-
except Exception as e:
|
104 |
-
print(f"Error calling MCP tool: {e}")
|
105 |
-
return f"Error calling MCP tool: {str(e)}"
|
106 |
-
|
107 |
-
def analyze_message_for_tool_call(message, active_mcp_servers, client_for_llm, model_to_use, system_message_for_llm):
|
108 |
-
"""Analyze a message to determine if an MCP tool should be called"""
|
109 |
-
# Skip analysis if message is empty
|
110 |
-
if not message or not message.strip():
|
111 |
-
return None, None
|
112 |
-
|
113 |
-
# Get information about available tools
|
114 |
-
tool_info = []
|
115 |
-
if active_mcp_servers:
|
116 |
-
for server_name in active_mcp_servers:
|
117 |
-
if server_name in mcp_connections:
|
118 |
-
server_tools = mcp_connections[server_name]["tools"]
|
119 |
-
for tool in server_tools:
|
120 |
-
tool_info.append({
|
121 |
-
"server_name": server_name,
|
122 |
-
"tool_name": tool.name,
|
123 |
-
"description": tool.description
|
124 |
-
})
|
125 |
-
|
126 |
-
if not tool_info:
|
127 |
-
return None, None
|
128 |
-
|
129 |
-
# Create a structured query for the LLM to analyze if a tool call is needed
|
130 |
-
tools_desc = []
|
131 |
-
for info in tool_info:
|
132 |
-
tools_desc.append(f"{info['server_name']}.{info['tool_name']}: {info['description']}")
|
133 |
-
|
134 |
-
tools_string = "\n".join(tools_desc)
|
135 |
-
|
136 |
-
# Updated prompt to guide LLM for TTS tool that returns base64
|
137 |
-
analysis_system_prompt = f"""You are an assistant that helps determine if a user message requires using an external tool.
|
138 |
-
Available tools:
|
139 |
-
{tools_string}
|
140 |
-
|
141 |
-
Your job is to:
|
142 |
-
1. Analyze the user's message.
|
143 |
-
2. Determine if they're asking to use one of the tools.
|
144 |
-
3. If yes, respond ONLY with a JSON object with "server_name", "tool_name", and "parameters".
|
145 |
-
4. If no, respond ONLY with the exact string "NO_TOOL_NEEDED".
|
146 |
-
|
147 |
-
Example 1 (for TTS that returns base64 audio):
|
148 |
-
User: "Please turn this text into speech: Hello world"
|
149 |
-
Response: {{"server_name": "kokoroTTS", "tool_name": "text_to_audio_b64", "parameters": {{"text": "Hello world", "speed": 1.0}}}}
|
150 |
-
|
151 |
-
Example 2 (for TTS with different speed):
|
152 |
-
User: "Read 'This is faster' at speed 1.5"
|
153 |
-
Response: {{"server_name": "kokoroTTS", "tool_name": "text_to_audio_b64", "parameters": {{"text": "This is faster", "speed": 1.5}}}}
|
154 |
-
|
155 |
-
Example 3 (general, non-tool):
|
156 |
-
User: "What is the capital of France?"
|
157 |
-
Response: NO_TOOL_NEEDED"""
|
158 |
-
|
159 |
-
try:
|
160 |
-
# Call the LLM to analyze the message
|
161 |
-
response = client_for_llm.chat_completion(
|
162 |
-
model=model_to_use,
|
163 |
-
messages=[
|
164 |
-
{"role": "system", "content": analysis_system_prompt},
|
165 |
-
{"role": "user", "content": message}
|
166 |
-
],
|
167 |
-
temperature=0.1, # Low temperature for deterministic tool selection
|
168 |
-
max_tokens=300
|
169 |
-
)
|
170 |
-
|
171 |
-
analysis = response.choices[0].message.content.strip()
|
172 |
-
print(f"Tool analysis raw response: '{analysis}'")
|
173 |
-
|
174 |
-
if analysis == "NO_TOOL_NEEDED":
|
175 |
-
return None, None
|
176 |
-
|
177 |
-
# Try to parse JSON directly from the response
|
178 |
-
try:
|
179 |
-
tool_call = json.loads(analysis)
|
180 |
-
return tool_call.get("server_name"), {
|
181 |
-
"tool_name": tool_call.get("tool_name"),
|
182 |
-
"parameters": tool_call.get("parameters", {})
|
183 |
-
}
|
184 |
-
except json.JSONDecodeError:
|
185 |
-
print(f"Failed to parse tool call JSON directly from: {analysis}")
|
186 |
-
# Fallback to extracting JSON if not a direct JSON response
|
187 |
-
json_start = analysis.find("{")
|
188 |
-
json_end = analysis.rfind("}") + 1
|
189 |
-
|
190 |
-
if json_start != -1 and json_end != 0 and json_end > json_start:
|
191 |
-
json_str = analysis[json_start:json_end]
|
192 |
-
try:
|
193 |
-
tool_call = json.loads(json_str)
|
194 |
-
return tool_call.get("server_name"), {
|
195 |
-
"tool_name": tool_call.get("tool_name"),
|
196 |
-
"parameters": tool_call.get("parameters", {})
|
197 |
-
}
|
198 |
-
except json.JSONDecodeError:
|
199 |
-
print(f"Failed to parse extracted tool call JSON: {json_str}")
|
200 |
-
return None, None
|
201 |
-
else:
|
202 |
-
print(f"No JSON object found in analysis: {analysis}")
|
203 |
-
return None, None
|
204 |
-
|
205 |
-
except Exception as e:
|
206 |
-
print(f"Error analyzing message for tool calls: {str(e)}")
|
207 |
-
return None, None
|
208 |
-
|
209 |
def respond(
|
210 |
message,
|
211 |
-
image_files,
|
212 |
history: list[tuple[str, str]],
|
213 |
system_message,
|
214 |
max_tokens,
|
@@ -220,14 +53,11 @@ def respond(
|
|
220 |
custom_api_key,
|
221 |
custom_model,
|
222 |
model_search_term,
|
223 |
-
selected_model
|
224 |
-
mcp_enabled=False,
|
225 |
-
active_mcp_servers=None,
|
226 |
-
mcp_interaction_mode="Natural Language"
|
227 |
):
|
228 |
print(f"Received message: {message}")
|
229 |
print(f"Received {len(image_files) if image_files else 0} images")
|
230 |
-
|
231 |
print(f"System message: {system_message}")
|
232 |
print(f"Max tokens: {max_tokens}, Temperature: {temperature}, Top-P: {top_p}")
|
233 |
print(f"Frequency Penalty: {frequency_penalty}, Seed: {seed}")
|
@@ -236,10 +66,8 @@ def respond(
|
|
236 |
print(f"Selected model (custom_model): {custom_model}")
|
237 |
print(f"Model search term: {model_search_term}")
|
238 |
print(f"Selected model from radio: {selected_model}")
|
239 |
-
print(f"MCP enabled: {mcp_enabled}")
|
240 |
-
print(f"Active MCP servers: {active_mcp_servers}")
|
241 |
-
print(f"MCP interaction mode: {mcp_interaction_mode}")
|
242 |
|
|
|
243 |
token_to_use = custom_api_key if custom_api_key.strip() != "" else ACCESS_TOKEN
|
244 |
|
245 |
if custom_api_key.strip() != "":
|
@@ -247,160 +75,101 @@ def respond(
|
|
247 |
else:
|
248 |
print("USING DEFAULT API KEY: Environment variable HF_TOKEN is being used for authentication")
|
249 |
|
250 |
-
|
|
|
251 |
print(f"Hugging Face Inference Client initialized with {provider} provider.")
|
252 |
|
|
|
253 |
if seed == -1:
|
254 |
seed = None
|
255 |
-
|
256 |
-
model_to_use = custom_model.strip() if custom_model.strip() != "" else selected_model
|
257 |
-
print(f"Model selected for inference: {model_to_use}")
|
258 |
|
259 |
-
if
|
260 |
-
if message.startswith("/mcp"):
|
261 |
-
command_parts = message.split(" ", 3)
|
262 |
-
if len(command_parts) < 3:
|
263 |
-
yield "Invalid MCP command. Format: /mcp <server_name> <tool_name> [arguments_json]"
|
264 |
-
return
|
265 |
-
|
266 |
-
_, server_name, tool_name = command_parts[:3]
|
267 |
-
args_json_str = "{}" if len(command_parts) < 4 else command_parts[3]
|
268 |
-
|
269 |
-
try:
|
270 |
-
args_dict = json.loads(args_json_str)
|
271 |
-
result = call_mcp_tool(server_name, tool_name, **args_dict)
|
272 |
-
|
273 |
-
if "audio" in tool_name.lower() and "b64" in tool_name.lower() and isinstance(result, str):
|
274 |
-
audio_html = f'<audio controls src="data:audio/wav;base64,{result}"></audio>'
|
275 |
-
yield f"Executed {tool_name} from {server_name}.\n\nResult:\n{audio_html}"
|
276 |
-
elif isinstance(result, dict):
|
277 |
-
yield json.dumps(result, indent=2)
|
278 |
-
else:
|
279 |
-
yield str(result)
|
280 |
-
return # MCP command handled, exit
|
281 |
-
except json.JSONDecodeError:
|
282 |
-
yield f"Invalid JSON arguments: {args_json_str}"
|
283 |
-
return
|
284 |
-
except Exception as e:
|
285 |
-
yield f"Error executing MCP command: {str(e)}"
|
286 |
-
return
|
287 |
-
elif mcp_interaction_mode == "Natural Language" and active_mcp_servers:
|
288 |
-
server_name, tool_info = analyze_message_for_tool_call(
|
289 |
-
message,
|
290 |
-
active_mcp_servers,
|
291 |
-
client_for_llm,
|
292 |
-
model_to_use,
|
293 |
-
system_message # Original system message for context, LLM uses its own for analysis
|
294 |
-
)
|
295 |
-
|
296 |
-
if server_name and tool_info and tool_info.get("tool_name"):
|
297 |
-
try:
|
298 |
-
print(f"Calling tool via natural language: {server_name}.{tool_info['tool_name']} with parameters: {tool_info.get('parameters', {})}")
|
299 |
-
result = call_mcp_tool(server_name, tool_info['tool_name'], **tool_info.get('parameters', {}))
|
300 |
-
|
301 |
-
tool_display_name = tool_info['tool_name']
|
302 |
-
if "audio" in tool_display_name.lower() and "b64" in tool_display_name.lower() and isinstance(result, str) and len(result) > 100: # Heuristic for base64 audio
|
303 |
-
audio_html = f'<audio controls src="data:audio/wav;base64,{result}"></audio>'
|
304 |
-
yield f"I used the {tool_display_name} tool from {server_name} with your request.\n\nResult:\n{audio_html}"
|
305 |
-
elif isinstance(result, dict):
|
306 |
-
result_str = json.dumps(result, indent=2)
|
307 |
-
yield f"I used the {tool_display_name} tool from {server_name} with your request.\n\nResult:\n{result_str}"
|
308 |
-
else:
|
309 |
-
result_str = str(result)
|
310 |
-
yield f"I used the {tool_display_name} tool from {server_name} with your request.\n\nResult:\n{result_str}"
|
311 |
-
return # MCP tool call handled via natural language
|
312 |
-
except Exception as e:
|
313 |
-
print(f"Error executing MCP tool via natural language: {str(e)}")
|
314 |
-
yield f"I tried to use a tool but encountered an error: {str(e)}. I will try to respond without it."
|
315 |
-
# Fall through to normal LLM response if tool call fails
|
316 |
-
|
317 |
-
user_content = []
|
318 |
-
if message and message.strip():
|
319 |
-
user_content.append({"type": "text", "text": message})
|
320 |
-
|
321 |
if image_files and len(image_files) > 0:
|
322 |
-
|
323 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
324 |
try:
|
325 |
-
encoded_image = encode_image(
|
326 |
if encoded_image:
|
327 |
user_content.append({
|
328 |
"type": "image_url",
|
329 |
-
"image_url": {
|
|
|
|
|
330 |
})
|
331 |
except Exception as e:
|
332 |
-
print(f"Error encoding image
|
333 |
-
|
334 |
-
|
335 |
-
|
336 |
-
yield "" # Or handle appropriately, maybe return if no content
|
337 |
-
return
|
338 |
-
|
339 |
-
|
340 |
-
augmented_system_message = system_message
|
341 |
-
if mcp_enabled and active_mcp_servers:
|
342 |
-
tool_desc_list = []
|
343 |
-
for server_name_active in active_mcp_servers:
|
344 |
-
if server_name_active in mcp_connections:
|
345 |
-
# Get tools for this specific server
|
346 |
-
# Assuming list_mcp_tools returns a string like "- tool1: desc1\n- tool2: desc2"
|
347 |
-
server_tools_str = list_mcp_tools(server_name_active)
|
348 |
-
if server_tools_str != "Server not connected" and server_tools_str != "No tools available for this server":
|
349 |
-
for line in server_tools_str.split('\n'):
|
350 |
-
if line.startswith("- "):
|
351 |
-
tool_desc_list.append(f"{server_name_active}.{line[2:]}") # e.g., kokoroTTS.text_to_audio_b64: Convert text...
|
352 |
-
|
353 |
-
if tool_desc_list:
|
354 |
-
mcp_tools_description_for_llm = "\n".join(tool_desc_list)
|
355 |
-
|
356 |
-
# This informs the main LLM about available tools for general conversation,
|
357 |
-
# distinct from the specialized analyzer LLM.
|
358 |
-
# The main LLM doesn't call tools directly but can use this info to guide the user.
|
359 |
-
if mcp_interaction_mode == "Command Mode":
|
360 |
-
augmented_system_message += f"\n\nYou have access to the following MCP tools which the user can invoke:\n{mcp_tools_description_for_llm}\n\nTo use these tools, the user can type a command in the format: /mcp <server_name> <tool_name> <arguments_json>"
|
361 |
-
else: # Natural Language
|
362 |
-
augmented_system_message += f"\n\nYou have access to the following MCP tools. The system will try to use them automatically if the user's request matches their capability:\n{mcp_tools_description_for_llm}\n\nIf the user asks to do something a tool can do, the system will attempt to use it. For example, if a 'text_to_audio_b64' tool is available, and the user says 'read this text aloud', the system will try to use that tool."
|
363 |
-
|
364 |
|
365 |
-
|
|
|
366 |
print("Initial messages array constructed.")
|
367 |
|
368 |
-
|
369 |
-
|
370 |
-
|
371 |
-
|
372 |
-
if
|
373 |
-
|
374 |
-
|
375 |
-
|
376 |
-
|
377 |
-
|
378 |
-
|
379 |
-
|
380 |
-
|
381 |
-
|
382 |
-
|
383 |
-
|
384 |
-
|
385 |
-
|
386 |
-
|
387 |
-
|
388 |
-
|
389 |
-
|
390 |
-
|
391 |
-
|
392 |
-
|
|
|
|
|
|
|
|
|
|
|
393 |
else:
|
394 |
-
|
395 |
-
|
|
|
|
|
|
|
|
|
|
|
396 |
|
397 |
-
|
|
|
398 |
print(f"Latest user message appended (content type: {type(user_content)})")
|
399 |
-
# print(f"Messages for LLM: {json.dumps(messages_for_llm, indent=2)}") # Very verbose
|
400 |
|
401 |
-
|
402 |
-
|
|
|
|
|
|
|
|
|
|
|
403 |
|
|
|
404 |
parameters = {
|
405 |
"max_tokens": max_tokens,
|
406 |
"temperature": temperature,
|
@@ -411,273 +180,388 @@ def respond(
|
|
411 |
if seed is not None:
|
412 |
parameters["seed"] = seed
|
413 |
|
|
|
414 |
try:
|
415 |
-
|
|
|
416 |
model=model_to_use,
|
417 |
-
messages=
|
418 |
stream=True,
|
419 |
**parameters
|
420 |
)
|
421 |
|
422 |
-
print("
|
423 |
|
|
|
424 |
for chunk in stream:
|
425 |
if hasattr(chunk, 'choices') and len(chunk.choices) > 0:
|
|
|
426 |
if hasattr(chunk.choices[0], 'delta') and hasattr(chunk.choices[0].delta, 'content'):
|
427 |
token_text = chunk.choices[0].delta.content
|
428 |
if token_text:
|
429 |
print(token_text, end="", flush=True)
|
430 |
-
|
431 |
-
yield
|
432 |
-
|
|
|
433 |
except Exception as e:
|
434 |
-
print(f"Error during
|
435 |
-
|
436 |
-
yield
|
|
|
|
|
437 |
|
438 |
-
|
|
|
|
|
|
|
|
|
439 |
|
440 |
# GRADIO UI
|
441 |
with gr.Blocks(theme="Nymbo/Nymbo_Theme") as demo:
|
|
|
442 |
chatbot = gr.Chatbot(
|
443 |
height=600,
|
444 |
show_copy_button=True,
|
445 |
-
placeholder="Select a model and begin chatting. Now supports multiple inference providers
|
446 |
-
layout="panel"
|
447 |
-
show_label=False,
|
448 |
-
render=False # Delay rendering
|
449 |
)
|
450 |
print("Chatbot interface created.")
|
451 |
|
452 |
-
|
453 |
-
|
454 |
-
|
455 |
-
|
456 |
-
|
457 |
-
|
458 |
-
|
459 |
-
|
460 |
-
|
461 |
-
|
462 |
-
)
|
463 |
-
|
464 |
-
# Render chatbot and message box after defining them
|
465 |
-
chatbot.render()
|
466 |
-
msg.render()
|
467 |
|
|
|
|
|
|
|
468 |
with gr.Accordion("Settings", open=False):
|
|
|
469 |
system_message_box = gr.Textbox(
|
470 |
value="You are a helpful AI assistant that can understand images and text.",
|
471 |
placeholder="You are a helpful assistant.",
|
472 |
label="System Prompt"
|
473 |
)
|
474 |
|
|
|
475 |
with gr.Row():
|
476 |
with gr.Column():
|
477 |
-
max_tokens_slider = gr.Slider(
|
478 |
-
|
479 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
480 |
with gr.Column():
|
481 |
-
frequency_penalty_slider = gr.Slider(
|
482 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
483 |
|
484 |
-
|
485 |
-
|
486 |
-
|
487 |
-
|
488 |
-
|
|
|
489 |
|
|
|
|
|
490 |
models_list = [
|
491 |
-
"meta-llama/Llama-3.2-11B-Vision-Instruct",
|
492 |
-
"meta-llama/Llama-3-70B-Instruct",
|
493 |
-
"
|
494 |
-
"
|
495 |
-
"
|
496 |
-
"
|
497 |
-
"
|
498 |
-
"
|
499 |
-
"
|
500 |
-
"
|
501 |
-
|
502 |
-
|
503 |
-
|
504 |
-
|
505 |
-
|
506 |
-
|
507 |
-
|
508 |
-
|
509 |
-
|
510 |
-
|
511 |
-
|
512 |
-
|
513 |
-
|
514 |
-
|
515 |
-
|
516 |
-
|
517 |
-
|
518 |
-
|
|
|
|
|
|
|
|
|
|
|
519 |
|
520 |
-
|
|
|
521 |
|
522 |
-
|
|
|
523 |
print(f"Filtering models with search term: {search_term}")
|
524 |
-
if not search_term: return gr.update(choices=models_list)
|
525 |
filtered = [m for m in models_list if search_term.lower() in m.lower()]
|
526 |
print(f"Filtered models: {filtered}")
|
527 |
-
return gr.update(choices=filtered
|
528 |
-
|
529 |
-
def update_custom_model_from_radio(selected_featured_model):
|
530 |
-
print(f"Featured model selected: {selected_featured_model}")
|
531 |
-
# This function now updates the custom_model_box.
|
532 |
-
# If you want the radio selection to BE the model_to_use unless custom_model_box has text,
|
533 |
-
# then custom_model_box should be cleared or its value used as override.
|
534 |
-
# For now, let's assume custom_model_box is an override.
|
535 |
-
# If you want the radio to directly feed into the selected_model parameter for respond(),
|
536 |
-
# then this function might not be needed or custom_model_box should be used as an override.
|
537 |
-
return selected_featured_model # This updates the custom_model_box with the radio selection.
|
538 |
-
|
539 |
-
def handle_connect_mcp_server(url, name_suggestion):
|
540 |
-
actual_name, status_msg = connect_to_mcp_server(url, name_suggestion)
|
541 |
-
all_server_names = list(mcp_connections.keys())
|
542 |
-
# Keep existing selections if possible
|
543 |
-
current_selection = active_mcp_servers.value if active_mcp_servers.value else []
|
544 |
-
new_selection = [s for s in current_selection if s in all_server_names]
|
545 |
-
if actual_name and actual_name not in new_selection : # Auto-select newly connected server
|
546 |
-
new_selection.append(actual_name)
|
547 |
-
return status_msg, gr.update(choices=all_server_names, value=new_selection)
|
548 |
|
549 |
-
#
|
550 |
-
|
551 |
-
|
552 |
-
|
553 |
-
files = user_input_dict.get("files", []) # List of file paths
|
554 |
|
555 |
-
|
556 |
-
|
|
|
|
|
557 |
|
558 |
-
#
|
559 |
-
|
560 |
-
|
561 |
-
|
562 |
|
563 |
-
#
|
564 |
-
|
565 |
-
|
566 |
-
if files:
|
567 |
-
for file_path in files:
|
568 |
-
visual_history_additions.append([ (file_path,), None]) # Gradio Chatbot expects tuple for files
|
569 |
-
|
570 |
-
return visual_history_additions, current_chat_history_state
|
571 |
-
|
572 |
-
|
573 |
-
# This function is called after user message is processed.
|
574 |
-
# It calls the LLM and streams the response.
|
575 |
-
def handle_bot_response(
|
576 |
-
current_chat_history_state, # This is the state with the latest user message
|
577 |
-
sys_msg, max_tok, temp, top_p_val, freq_pen, seed_val, prov, api_key_val, cust_model,
|
578 |
-
search, selected_feat_model, mcp_on, active_servs, mcp_interact_mode
|
579 |
-
):
|
580 |
-
if not current_chat_history_state or current_chat_history_state[-1][1] is not None:
|
581 |
-
# User message not yet added or bot already responded
|
582 |
-
yield current_chat_history_state # Or some empty update
|
583 |
-
return
|
584 |
-
|
585 |
-
# The user message is the first element of the last item in chat_history_state
|
586 |
-
# It's a dict: {'text': '...', 'files': ['path1', ...]}
|
587 |
-
user_message_dict = current_chat_history_state[-1][0]
|
588 |
|
589 |
-
|
590 |
-
|
591 |
-
|
592 |
-
#
|
593 |
-
|
594 |
-
|
595 |
-
|
596 |
-
|
597 |
-
|
598 |
-
|
599 |
-
|
600 |
-
|
601 |
-
|
602 |
-
|
603 |
-
|
604 |
-
top_p=top_p_val,
|
605 |
-
frequency_penalty=freq_pen,
|
606 |
-
seed=seed_val,
|
607 |
-
provider=prov,
|
608 |
-
custom_api_key=api_key_val,
|
609 |
-
custom_model=cust_model,
|
610 |
-
model_search_term=search, # This might be redundant if featured_model_radio directly updates custom_model_box
|
611 |
-
selected_model=selected_feat_model, # This is the value from the radio
|
612 |
-
mcp_enabled=mcp_on,
|
613 |
-
active_mcp_servers=active_servs,
|
614 |
-
mcp_interaction_mode=mcp_interact_mode
|
615 |
-
):
|
616 |
-
full_response = R
|
617 |
-
# Update the last item in chat_history_state with bot's response
|
618 |
-
current_chat_history_state[-1][1] = full_response
|
619 |
|
620 |
-
#
|
621 |
-
|
622 |
-
|
623 |
-
|
624 |
-
|
625 |
-
|
626 |
-
user_files_viz = user_turn.get("files", [])
|
627 |
-
if user_text_viz:
|
628 |
-
visual_history_update.append([user_text_viz, None if bot_turn is None and user_turn == current_chat_history_state[-1][0] else bot_turn]) # Add text part
|
629 |
-
for f_path in user_files_viz:
|
630 |
-
visual_history_update.append([(f_path,), None if bot_turn is None and user_turn == current_chat_history_state[-1][0] else bot_turn]) # Add image part
|
631 |
-
# Bot turn processing if user turn was only text and no files
|
632 |
-
if not user_text_viz and not user_files_viz and user_text_viz == "" : # Should not happen with current logic
|
633 |
-
visual_history_update.append(["", bot_turn])
|
634 |
-
elif not user_files_viz and user_text_viz and bot_turn is not None and visual_history_update[-1][0] == user_text_viz :
|
635 |
-
visual_history_update[-1][1] = bot_turn # Assign bot response to the text part
|
636 |
|
637 |
-
|
638 |
-
|
639 |
-
|
640 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
641 |
msg.submit(
|
642 |
-
|
643 |
-
[msg,
|
644 |
-
[chatbot
|
645 |
-
queue=
|
646 |
).then(
|
647 |
-
|
648 |
-
[
|
649 |
frequency_penalty_slider, seed_slider, provider_radio, byok_textbox, custom_model_box,
|
650 |
-
model_search_box, featured_model_radio
|
651 |
-
[chatbot
|
652 |
).then(
|
653 |
-
lambda:
|
654 |
None,
|
655 |
-
[msg]
|
656 |
-
queue=False # No queue for simple UI update
|
657 |
)
|
658 |
|
659 |
-
|
660 |
-
|
661 |
-
|
662 |
-
|
|
|
663 |
)
|
664 |
-
|
665 |
-
model_search_box.change(fn=filter_models_choices, inputs=model_search_box, outputs=featured_model_radio)
|
666 |
-
# Let radio button directly be the selected_model, custom_model_box is an override
|
667 |
-
# featured_model_radio.change(fn=update_custom_model_from_radio, inputs=featured_model_radio, outputs=custom_model_box)
|
668 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
669 |
|
670 |
-
|
671 |
-
|
672 |
-
|
673 |
-
|
674 |
-
|
675 |
-
|
676 |
-
|
677 |
-
provider_radio.change(fn=validate_provider_choice, inputs=[byok_textbox, provider_radio], outputs=provider_radio)
|
678 |
|
679 |
print("Gradio interface initialized.")
|
680 |
|
681 |
if __name__ == "__main__":
|
682 |
print("Launching the demo application.")
|
683 |
-
demo.
|
|
|
5 |
import base64
|
6 |
from PIL import Image
|
7 |
import io
|
|
|
|
|
8 |
|
9 |
ACCESS_TOKEN = os.getenv("HF_TOKEN")
|
10 |
print("Access token loaded.")
|
|
|
39 |
print(f"Error encoding image: {e}")
|
40 |
return None
|
41 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
42 |
def respond(
|
43 |
message,
|
44 |
+
image_files, # Changed parameter name and structure
|
45 |
history: list[tuple[str, str]],
|
46 |
system_message,
|
47 |
max_tokens,
|
|
|
53 |
custom_api_key,
|
54 |
custom_model,
|
55 |
model_search_term,
|
56 |
+
selected_model
|
|
|
|
|
|
|
57 |
):
|
58 |
print(f"Received message: {message}")
|
59 |
print(f"Received {len(image_files) if image_files else 0} images")
|
60 |
+
print(f"History: {history}")
|
61 |
print(f"System message: {system_message}")
|
62 |
print(f"Max tokens: {max_tokens}, Temperature: {temperature}, Top-P: {top_p}")
|
63 |
print(f"Frequency Penalty: {frequency_penalty}, Seed: {seed}")
|
|
|
66 |
print(f"Selected model (custom_model): {custom_model}")
|
67 |
print(f"Model search term: {model_search_term}")
|
68 |
print(f"Selected model from radio: {selected_model}")
|
|
|
|
|
|
|
69 |
|
70 |
+
# Determine which token to use
|
71 |
token_to_use = custom_api_key if custom_api_key.strip() != "" else ACCESS_TOKEN
|
72 |
|
73 |
if custom_api_key.strip() != "":
|
|
|
75 |
else:
|
76 |
print("USING DEFAULT API KEY: Environment variable HF_TOKEN is being used for authentication")
|
77 |
|
78 |
+
# Initialize the Inference Client with the provider and appropriate token
|
79 |
+
client = InferenceClient(token=token_to_use, provider=provider)
|
80 |
print(f"Hugging Face Inference Client initialized with {provider} provider.")
|
81 |
|
82 |
+
# Convert seed to None if -1 (meaning random)
|
83 |
if seed == -1:
|
84 |
seed = None
|
|
|
|
|
|
|
85 |
|
86 |
+
# Create multimodal content if images are present
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
87 |
if image_files and len(image_files) > 0:
|
88 |
+
# Process the user message to include images
|
89 |
+
user_content = []
|
90 |
+
|
91 |
+
# Add text part if there is any
|
92 |
+
if message and message.strip():
|
93 |
+
user_content.append({
|
94 |
+
"type": "text",
|
95 |
+
"text": message
|
96 |
+
})
|
97 |
+
|
98 |
+
# Add image parts
|
99 |
+
for img in image_files:
|
100 |
+
if img is not None:
|
101 |
+
# Get raw image data from path
|
102 |
try:
|
103 |
+
encoded_image = encode_image(img)
|
104 |
if encoded_image:
|
105 |
user_content.append({
|
106 |
"type": "image_url",
|
107 |
+
"image_url": {
|
108 |
+
"url": f"data:image/jpeg;base64,{encoded_image}"
|
109 |
+
}
|
110 |
})
|
111 |
except Exception as e:
|
112 |
+
print(f"Error encoding image: {e}")
|
113 |
+
else:
|
114 |
+
# Text-only message
|
115 |
+
user_content = message
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
116 |
|
117 |
+
# Prepare messages in the format expected by the API
|
118 |
+
messages = [{"role": "system", "content": system_message}]
|
119 |
print("Initial messages array constructed.")
|
120 |
|
121 |
+
# Add conversation history to the context
|
122 |
+
for val in history:
|
123 |
+
user_part = val[0]
|
124 |
+
assistant_part = val[1]
|
125 |
+
if user_part:
|
126 |
+
# Handle both text-only and multimodal messages in history
|
127 |
+
if isinstance(user_part, tuple) and len(user_part) == 2:
|
128 |
+
# This is a multimodal message with text and images
|
129 |
+
history_content = []
|
130 |
+
if user_part[0]: # Text
|
131 |
+
history_content.append({
|
132 |
+
"type": "text",
|
133 |
+
"text": user_part[0]
|
134 |
+
})
|
135 |
+
|
136 |
+
for img in user_part[1]: # Images
|
137 |
+
if img:
|
138 |
+
try:
|
139 |
+
encoded_img = encode_image(img)
|
140 |
+
if encoded_img:
|
141 |
+
history_content.append({
|
142 |
+
"type": "image_url",
|
143 |
+
"image_url": {
|
144 |
+
"url": f"data:image/jpeg;base64,{encoded_img}"
|
145 |
+
}
|
146 |
+
})
|
147 |
+
except Exception as e:
|
148 |
+
print(f"Error encoding history image: {e}")
|
149 |
+
|
150 |
+
messages.append({"role": "user", "content": history_content})
|
151 |
else:
|
152 |
+
# Regular text message
|
153 |
+
messages.append({"role": "user", "content": user_part})
|
154 |
+
print(f"Added user message to context (type: {type(user_part)})")
|
155 |
+
|
156 |
+
if assistant_part:
|
157 |
+
messages.append({"role": "assistant", "content": assistant_part})
|
158 |
+
print(f"Added assistant message to context: {assistant_part}")
|
159 |
|
160 |
+
# Append the latest user message
|
161 |
+
messages.append({"role": "user", "content": user_content})
|
162 |
print(f"Latest user message appended (content type: {type(user_content)})")
|
|
|
163 |
|
164 |
+
# Determine which model to use, prioritizing custom_model if provided
|
165 |
+
model_to_use = custom_model.strip() if custom_model.strip() != "" else selected_model
|
166 |
+
print(f"Model selected for inference: {model_to_use}")
|
167 |
+
|
168 |
+
# Start with an empty string to build the response as tokens stream in
|
169 |
+
response = ""
|
170 |
+
print(f"Sending request to {provider} provider.")
|
171 |
|
172 |
+
# Prepare parameters for the chat completion request
|
173 |
parameters = {
|
174 |
"max_tokens": max_tokens,
|
175 |
"temperature": temperature,
|
|
|
180 |
if seed is not None:
|
181 |
parameters["seed"] = seed
|
182 |
|
183 |
+
# Use the InferenceClient for making the request
|
184 |
try:
|
185 |
+
# Create a generator for the streaming response
|
186 |
+
stream = client.chat_completion(
|
187 |
model=model_to_use,
|
188 |
+
messages=messages,
|
189 |
stream=True,
|
190 |
**parameters
|
191 |
)
|
192 |
|
193 |
+
print("Received tokens: ", end="", flush=True)
|
194 |
|
195 |
+
# Process the streaming response
|
196 |
for chunk in stream:
|
197 |
if hasattr(chunk, 'choices') and len(chunk.choices) > 0:
|
198 |
+
# Extract the content from the response
|
199 |
if hasattr(chunk.choices[0], 'delta') and hasattr(chunk.choices[0].delta, 'content'):
|
200 |
token_text = chunk.choices[0].delta.content
|
201 |
if token_text:
|
202 |
print(token_text, end="", flush=True)
|
203 |
+
response += token_text
|
204 |
+
yield response
|
205 |
+
|
206 |
+
print()
|
207 |
except Exception as e:
|
208 |
+
print(f"Error during inference: {e}")
|
209 |
+
response += f"\nError: {str(e)}"
|
210 |
+
yield response
|
211 |
+
|
212 |
+
print("Completed response generation.")
|
213 |
|
214 |
+
# Function to validate provider selection based on BYOK
|
215 |
+
def validate_provider(api_key, provider):
|
216 |
+
if not api_key.strip() and provider != "hf-inference":
|
217 |
+
return gr.update(value="hf-inference")
|
218 |
+
return gr.update(value=provider)
|
219 |
|
220 |
# GRADIO UI
|
221 |
with gr.Blocks(theme="Nymbo/Nymbo_Theme") as demo:
|
222 |
+
# Create the chatbot component
|
223 |
chatbot = gr.Chatbot(
|
224 |
height=600,
|
225 |
show_copy_button=True,
|
226 |
+
placeholder="Select a model and begin chatting. Now supports multiple inference providers and multimodal inputs",
|
227 |
+
layout="panel"
|
|
|
|
|
228 |
)
|
229 |
print("Chatbot interface created.")
|
230 |
|
231 |
+
# Multimodal textbox for messages (combines text and file uploads)
|
232 |
+
msg = gr.MultimodalTextbox(
|
233 |
+
placeholder="Type a message or upload images...",
|
234 |
+
show_label=False,
|
235 |
+
container=False,
|
236 |
+
scale=12,
|
237 |
+
file_types=["image"],
|
238 |
+
file_count="multiple",
|
239 |
+
sources=["upload"]
|
240 |
+
)
|
|
|
|
|
|
|
|
|
|
|
241 |
|
242 |
+
# Note: We're removing the separate submit button since MultimodalTextbox has its own
|
243 |
+
|
244 |
+
# Create accordion for settings
|
245 |
with gr.Accordion("Settings", open=False):
|
246 |
+
# System message
|
247 |
system_message_box = gr.Textbox(
|
248 |
value="You are a helpful AI assistant that can understand images and text.",
|
249 |
placeholder="You are a helpful assistant.",
|
250 |
label="System Prompt"
|
251 |
)
|
252 |
|
253 |
+
# Generation parameters
|
254 |
with gr.Row():
|
255 |
with gr.Column():
|
256 |
+
max_tokens_slider = gr.Slider(
|
257 |
+
minimum=1,
|
258 |
+
maximum=4096,
|
259 |
+
value=512,
|
260 |
+
step=1,
|
261 |
+
label="Max tokens"
|
262 |
+
)
|
263 |
+
|
264 |
+
temperature_slider = gr.Slider(
|
265 |
+
minimum=0.1,
|
266 |
+
maximum=4.0,
|
267 |
+
value=0.7,
|
268 |
+
step=0.1,
|
269 |
+
label="Temperature"
|
270 |
+
)
|
271 |
+
|
272 |
+
top_p_slider = gr.Slider(
|
273 |
+
minimum=0.1,
|
274 |
+
maximum=1.0,
|
275 |
+
value=0.95,
|
276 |
+
step=0.05,
|
277 |
+
label="Top-P"
|
278 |
+
)
|
279 |
+
|
280 |
with gr.Column():
|
281 |
+
frequency_penalty_slider = gr.Slider(
|
282 |
+
minimum=-2.0,
|
283 |
+
maximum=2.0,
|
284 |
+
value=0.0,
|
285 |
+
step=0.1,
|
286 |
+
label="Frequency Penalty"
|
287 |
+
)
|
288 |
+
|
289 |
+
seed_slider = gr.Slider(
|
290 |
+
minimum=-1,
|
291 |
+
maximum=65535,
|
292 |
+
value=-1,
|
293 |
+
step=1,
|
294 |
+
label="Seed (-1 for random)"
|
295 |
+
)
|
296 |
+
|
297 |
+
# Provider selection
|
298 |
+
providers_list = [
|
299 |
+
"hf-inference", # Default Hugging Face Inference
|
300 |
+
"cerebras", # Cerebras provider
|
301 |
+
"together", # Together AI
|
302 |
+
"sambanova", # SambaNova
|
303 |
+
"novita", # Novita AI
|
304 |
+
"cohere", # Cohere
|
305 |
+
"fireworks-ai", # Fireworks AI
|
306 |
+
"hyperbolic", # Hyperbolic
|
307 |
+
"nebius", # Nebius
|
308 |
+
]
|
309 |
+
|
310 |
+
provider_radio = gr.Radio(
|
311 |
+
choices=providers_list,
|
312 |
+
value="hf-inference",
|
313 |
+
label="Inference Provider",
|
314 |
+
)
|
315 |
+
|
316 |
+
# New BYOK textbox
|
317 |
+
byok_textbox = gr.Textbox(
|
318 |
+
value="",
|
319 |
+
label="BYOK (Bring Your Own Key)",
|
320 |
+
info="Enter a custom Hugging Face API key here. When empty, only 'hf-inference' provider can be used.",
|
321 |
+
placeholder="Enter your Hugging Face API token",
|
322 |
+
type="password" # Hide the API key for security
|
323 |
+
)
|
324 |
+
|
325 |
+
# Custom model box
|
326 |
+
custom_model_box = gr.Textbox(
|
327 |
+
value="",
|
328 |
+
label="Custom Model",
|
329 |
+
info="(Optional) Provide a custom Hugging Face model path. Overrides any selected featured model.",
|
330 |
+
placeholder="meta-llama/Llama-3.3-70B-Instruct"
|
331 |
+
)
|
332 |
|
333 |
+
# Model search
|
334 |
+
model_search_box = gr.Textbox(
|
335 |
+
label="Filter Models",
|
336 |
+
placeholder="Search for a featured model...",
|
337 |
+
lines=1
|
338 |
+
)
|
339 |
|
340 |
+
# Featured models list
|
341 |
+
# Updated to include multimodal models
|
342 |
models_list = [
|
343 |
+
"meta-llama/Llama-3.2-11B-Vision-Instruct",
|
344 |
+
"meta-llama/Llama-3.3-70B-Instruct",
|
345 |
+
"meta-llama/Llama-3.1-70B-Instruct",
|
346 |
+
"meta-llama/Llama-3.0-70B-Instruct",
|
347 |
+
"meta-llama/Llama-3.2-3B-Instruct",
|
348 |
+
"meta-llama/Llama-3.2-1B-Instruct",
|
349 |
+
"meta-llama/Llama-3.1-8B-Instruct",
|
350 |
+
"NousResearch/Hermes-3-Llama-3.1-8B",
|
351 |
+
"NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO",
|
352 |
+
"mistralai/Mistral-Nemo-Instruct-2407",
|
353 |
+
"mistralai/Mixtral-8x7B-Instruct-v0.1",
|
354 |
+
"mistralai/Mistral-7B-Instruct-v0.3",
|
355 |
+
"mistralai/Mistral-7B-Instruct-v0.2",
|
356 |
+
"Qwen/Qwen3-235B-A22B",
|
357 |
+
"Qwen/Qwen3-32B",
|
358 |
+
"Qwen/Qwen2.5-72B-Instruct",
|
359 |
+
"Qwen/Qwen2.5-3B-Instruct",
|
360 |
+
"Qwen/Qwen2.5-0.5B-Instruct",
|
361 |
+
"Qwen/QwQ-32B",
|
362 |
+
"Qwen/Qwen2.5-Coder-32B-Instruct",
|
363 |
+
"microsoft/Phi-3.5-mini-instruct",
|
364 |
+
"microsoft/Phi-3-mini-128k-instruct",
|
365 |
+
"microsoft/Phi-3-mini-4k-instruct",
|
366 |
+
]
|
367 |
+
|
368 |
+
featured_model_radio = gr.Radio(
|
369 |
+
label="Select a model below",
|
370 |
+
choices=models_list,
|
371 |
+
value="meta-llama/Llama-3.2-11B-Vision-Instruct", # Default to a multimodal model
|
372 |
+
interactive=True
|
373 |
+
)
|
374 |
+
|
375 |
+
gr.Markdown("[View all Text-to-Text models](https://huggingface.co/models?inference_provider=all&pipeline_tag=text-generation&sort=trending) | [View all multimodal models](https://huggingface.co/models?inference_provider=all&pipeline_tag=image-text-to-text&sort=trending)")
|
376 |
|
377 |
+
# Chat history state
|
378 |
+
chat_history = gr.State([])
|
379 |
|
380 |
+
# Function to filter models
|
381 |
+
def filter_models(search_term):
|
382 |
print(f"Filtering models with search term: {search_term}")
|
|
|
383 |
filtered = [m for m in models_list if search_term.lower() in m.lower()]
|
384 |
print(f"Filtered models: {filtered}")
|
385 |
+
return gr.update(choices=filtered)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
386 |
|
387 |
+
# Function to set custom model from radio
|
388 |
+
def set_custom_model_from_radio(selected):
|
389 |
+
print(f"Featured model selected: {selected}")
|
390 |
+
return selected
|
|
|
391 |
|
392 |
+
# Function for the chat interface
|
393 |
+
def user(user_message, history):
|
394 |
+
# Debug logging for troubleshooting
|
395 |
+
print(f"User message received: {user_message}")
|
396 |
|
397 |
+
# Skip if message is empty (no text and no files)
|
398 |
+
if not user_message or (not user_message.get("text") and not user_message.get("files")):
|
399 |
+
print("Empty message, skipping")
|
400 |
+
return history
|
401 |
|
402 |
+
# Prepare multimodal message format
|
403 |
+
text_content = user_message.get("text", "").strip()
|
404 |
+
files = user_message.get("files", [])
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
405 |
|
406 |
+
print(f"Text content: {text_content}")
|
407 |
+
print(f"Files: {files}")
|
408 |
+
|
409 |
+
# If both text and files are empty, skip
|
410 |
+
if not text_content and not files:
|
411 |
+
print("No content to display")
|
412 |
+
return history
|
413 |
+
|
414 |
+
# Add message with images to history
|
415 |
+
if files and len(files) > 0:
|
416 |
+
# Add text message first if it exists
|
417 |
+
if text_content:
|
418 |
+
# Add a separate text message
|
419 |
+
print(f"Adding text message: {text_content}")
|
420 |
+
history.append([text_content, None])
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
421 |
|
422 |
+
# Then add each image file separately
|
423 |
+
for file_path in files:
|
424 |
+
if file_path and isinstance(file_path, str):
|
425 |
+
print(f"Adding image: {file_path}")
|
426 |
+
# Add image as a separate message with no text
|
427 |
+
history.append([f"", None])
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
428 |
|
429 |
+
return history
|
430 |
+
else:
|
431 |
+
# For text-only messages
|
432 |
+
print(f"Adding text-only message: {text_content}")
|
433 |
+
history.append([text_content, None])
|
434 |
+
return history
|
435 |
+
|
436 |
+
# Define bot response function
|
437 |
+
def bot(history, system_msg, max_tokens, temperature, top_p, freq_penalty, seed, provider, api_key, custom_model, search_term, selected_model):
|
438 |
+
# Check if history is valid
|
439 |
+
if not history or len(history) == 0:
|
440 |
+
print("No history to process")
|
441 |
+
return history
|
442 |
+
|
443 |
+
# Get the most recent message and detect if it's an image
|
444 |
+
user_message = history[-1][0]
|
445 |
+
print(f"Processing user message: {user_message}")
|
446 |
+
|
447 |
+
is_image = False
|
448 |
+
image_path = None
|
449 |
+
text_content = user_message
|
450 |
+
|
451 |
+
# Check if this is an image message (marked with ![Image])
|
452 |
+
if isinstance(user_message, str) and user_message.startswith(":
|
453 |
+
is_image = True
|
454 |
+
# Extract image path from markdown format 
|
455 |
+
image_path = user_message.replace(".replace(")", "")
|
456 |
+
print(f"Image detected: {image_path}")
|
457 |
+
text_content = "" # No text for image-only messages
|
458 |
+
|
459 |
+
# Look back for text context if this is an image
|
460 |
+
text_context = ""
|
461 |
+
if is_image and len(history) > 1:
|
462 |
+
# Use the previous message as context if it's text
|
463 |
+
prev_message = history[-2][0]
|
464 |
+
if isinstance(prev_message, str) and not prev_message.startswith(":
|
465 |
+
text_context = prev_message
|
466 |
+
print(f"Using text context from previous message: {text_context}")
|
467 |
+
|
468 |
+
# Process message through respond function
|
469 |
+
history[-1][1] = ""
|
470 |
+
|
471 |
+
# Use either the image or text for the API
|
472 |
+
if is_image:
|
473 |
+
# For image messages
|
474 |
+
for response in respond(
|
475 |
+
text_context, # Text context from previous message if any
|
476 |
+
[image_path], # Current image
|
477 |
+
history[:-1], # Previous history
|
478 |
+
system_msg,
|
479 |
+
max_tokens,
|
480 |
+
temperature,
|
481 |
+
top_p,
|
482 |
+
freq_penalty,
|
483 |
+
seed,
|
484 |
+
provider,
|
485 |
+
api_key,
|
486 |
+
custom_model,
|
487 |
+
search_term,
|
488 |
+
selected_model
|
489 |
+
):
|
490 |
+
history[-1][1] = response
|
491 |
+
yield history
|
492 |
+
else:
|
493 |
+
# For text-only messages
|
494 |
+
for response in respond(
|
495 |
+
text_content, # Text message
|
496 |
+
None, # No image
|
497 |
+
history[:-1], # Previous history
|
498 |
+
system_msg,
|
499 |
+
max_tokens,
|
500 |
+
temperature,
|
501 |
+
top_p,
|
502 |
+
freq_penalty,
|
503 |
+
seed,
|
504 |
+
provider,
|
505 |
+
api_key,
|
506 |
+
custom_model,
|
507 |
+
search_term,
|
508 |
+
selected_model
|
509 |
+
):
|
510 |
+
history[-1][1] = response
|
511 |
+
yield history
|
512 |
+
|
513 |
+
# Event handlers - only using the MultimodalTextbox's built-in submit functionality
|
514 |
msg.submit(
|
515 |
+
user,
|
516 |
+
[msg, chatbot],
|
517 |
+
[chatbot],
|
518 |
+
queue=False
|
519 |
).then(
|
520 |
+
bot,
|
521 |
+
[chatbot, system_message_box, max_tokens_slider, temperature_slider, top_p_slider,
|
522 |
frequency_penalty_slider, seed_slider, provider_radio, byok_textbox, custom_model_box,
|
523 |
+
model_search_box, featured_model_radio],
|
524 |
+
[chatbot]
|
525 |
).then(
|
526 |
+
lambda: {"text": "", "files": []}, # Clear inputs after submission
|
527 |
None,
|
528 |
+
[msg]
|
|
|
529 |
)
|
530 |
|
531 |
+
# Connect the model filter to update the radio choices
|
532 |
+
model_search_box.change(
|
533 |
+
fn=filter_models,
|
534 |
+
inputs=model_search_box,
|
535 |
+
outputs=featured_model_radio
|
536 |
)
|
537 |
+
print("Model search box change event linked.")
|
|
|
|
|
|
|
538 |
|
539 |
+
# Connect the featured model radio to update the custom model box
|
540 |
+
featured_model_radio.change(
|
541 |
+
fn=set_custom_model_from_radio,
|
542 |
+
inputs=featured_model_radio,
|
543 |
+
outputs=custom_model_box
|
544 |
+
)
|
545 |
+
print("Featured model radio button change event linked.")
|
546 |
+
|
547 |
+
# Connect the BYOK textbox to validate provider selection
|
548 |
+
byok_textbox.change(
|
549 |
+
fn=validate_provider,
|
550 |
+
inputs=[byok_textbox, provider_radio],
|
551 |
+
outputs=provider_radio
|
552 |
+
)
|
553 |
+
print("BYOK textbox change event linked.")
|
554 |
|
555 |
+
# Also validate provider when the radio changes to ensure consistency
|
556 |
+
provider_radio.change(
|
557 |
+
fn=validate_provider,
|
558 |
+
inputs=[byok_textbox, provider_radio],
|
559 |
+
outputs=provider_radio
|
560 |
+
)
|
561 |
+
print("Provider radio button change event linked.")
|
|
|
562 |
|
563 |
print("Gradio interface initialized.")
|
564 |
|
565 |
if __name__ == "__main__":
|
566 |
print("Launching the demo application.")
|
567 |
+
demo.launch(show_api=True)
|