Nymbo commited on
Commit
e8ea55a
·
verified ·
1 Parent(s): 0d8a414

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +424 -372
app.py CHANGED
@@ -1,44 +1,39 @@
1
  import gradio as gr
2
  from huggingface_hub import InferenceClient
3
  import os
4
- import json # Added for debug printing payloads
5
  import base64
6
  from PIL import Image
7
  import io
8
 
9
  ACCESS_TOKEN = os.getenv("HF_TOKEN")
10
- print(f"Access token from HF_TOKEN env var loaded. Is it None? {ACCESS_TOKEN is None}. Length if not None: {len(ACCESS_TOKEN) if ACCESS_TOKEN else 'N/A'}")
11
 
12
  # Function to encode image to base64
13
- def encode_image(image_path_or_pil):
14
- if not image_path_or_pil:
15
- print("No image path or PIL Image provided to encode_image")
16
  return None
17
 
18
  try:
19
- # print(f"Encoding image. Input type: {type(image_path_or_pil)}") # Debug
20
 
21
- if isinstance(image_path_or_pil, Image.Image):
22
- image = image_path_or_pil
23
- # print("Input is already a PIL Image.")
24
- elif isinstance(image_path_or_pil, str):
25
- # print(f"Input is a path string: {image_path_or_pil}")
26
- if not os.path.exists(image_path_or_pil):
27
- print(f"Error: Image path does not exist: {image_path_or_pil}")
28
- return None
29
- image = Image.open(image_path_or_pil)
30
  else:
31
- print(f"Error: Unsupported type for encode_image: {type(image_path_or_pil)}")
32
- return None
33
 
 
34
  if image.mode == 'RGBA':
35
- # print("Converting RGBA image to RGB.")
36
  image = image.convert('RGB')
37
 
 
38
  buffered = io.BytesIO()
39
  image.save(buffered, format="JPEG")
40
  img_str = base64.b64encode(buffered.getvalue()).decode("utf-8")
41
- # print("Image encoded successfully to base64.")
42
  return img_str
43
  except Exception as e:
44
  print(f"Error encoding image: {e}")
@@ -46,7 +41,7 @@ def encode_image(image_path_or_pil):
46
 
47
  def respond(
48
  message,
49
- image_files,
50
  history: list[tuple[str, str]],
51
  system_message,
52
  max_tokens,
@@ -55,144 +50,139 @@ def respond(
55
  frequency_penalty,
56
  seed,
57
  provider,
58
- custom_api_key, # This is the value from byok_textbox
59
  custom_model,
60
  model_search_term,
61
  selected_model
62
  ):
63
- print(f"--- New Respond Call ---")
64
- print(f"Received message: '{message}'")
65
- print(f"Received {len(image_files) if image_files else 0} image files.")
66
- # print(f"History length: {len(history)}") # History can be verbose
67
- print(f"System message: '{system_message}'")
68
- print(f"Generation Params: MaxTokens={max_tokens}, Temp={temperature}, TopP={top_p}, FreqPenalty={frequency_penalty}, Seed={seed}")
69
- print(f"Selected provider: '{provider}'")
 
 
 
 
 
 
 
70
 
71
- # Explicitly show the raw custom_api_key received
72
- raw_key_type = type(custom_api_key)
73
- raw_key_len = len(custom_api_key) if isinstance(custom_api_key, str) else 'N/A (not a string)'
74
- print(f"Raw custom_api_key from UI: type={raw_key_type}, length={raw_key_len}")
75
- if isinstance(custom_api_key, str) and len(custom_api_key) > 0:
76
- print(f"Raw custom_api_key (masked): '{custom_api_key[:4]}...{custom_api_key[-4:]}'" if len(custom_api_key) > 8 else custom_api_key)
77
-
78
-
79
- token_to_use = None
80
- effective_custom_key = ""
81
-
82
- if custom_api_key and isinstance(custom_api_key, str): # Ensure it's a string and not None
83
- effective_custom_key = custom_api_key.strip()
84
-
85
- if effective_custom_key: # True if string is not empty after stripping
86
- token_to_use = effective_custom_key
87
- print(f"TOKEN SELECTION: USING CUSTOM API KEY (BYOK). Length: {len(token_to_use)}")
88
- if ACCESS_TOKEN and token_to_use == ACCESS_TOKEN:
89
- print("INFO: Custom key is identical to the environment HF_TOKEN.")
90
  else:
91
- token_to_use = ACCESS_TOKEN # This will be None if HF_TOKEN is not set or empty
92
- if token_to_use:
93
- print(f"TOKEN SELECTION: USING DEFAULT API KEY (HF_TOKEN from env). Length: {len(token_to_use)}")
94
- else:
95
- print("TOKEN SELECTION: DEFAULT API KEY (HF_TOKEN from env) IS NOT SET or EMPTY. Custom key was also empty.")
96
-
97
- if not token_to_use:
98
- print("CRITICAL WARNING: No API token determined (neither custom nor default was usable/provided). Inference will likely fail or use public access if supported by model/provider.")
99
- # InferenceClient will handle token=None by trying its own env var lookup or failing.
100
- else:
101
- # For debugging, print a masked version of the token being finally used
102
- if isinstance(token_to_use, str) and len(token_to_use) > 8:
103
- print(f"FINAL TOKEN for InferenceClient: '{token_to_use[:4]}...{token_to_use[-4:]}' (masked)")
104
- elif isinstance(token_to_use, str):
105
- print(f"FINAL TOKEN for InferenceClient: '{token_to_use}' (short token)")
106
- else: # Should not happen if logic above is correct and token_to_use is string or None
107
- print(f"FINAL TOKEN for InferenceClient: {token_to_use} (not a string or None, unusual!)")
108
 
109
  # Initialize the Inference Client with the provider and appropriate token
110
  client = InferenceClient(token=token_to_use, provider=provider)
111
- print(f"Hugging Face Inference Client initialized with provider: '{provider}'.")
112
 
113
- if seed == -1: # Convert seed to None if -1 (meaning random)
 
114
  seed = None
115
 
116
- # Prepare user_content (current message with text and/or images)
117
- user_content_parts = []
118
- if message and message.strip():
119
- user_content_parts.append({"type": "text", "text": message})
120
-
121
  if image_files and len(image_files) > 0:
122
- for img_file_path in image_files:
123
- if img_file_path: # img_file_path is a string path from Gradio MultimodalTextbox
124
- encoded_image = encode_image(img_file_path)
125
- if encoded_image:
126
- user_content_parts.append({
127
- "type": "image_url",
128
- "image_url": {"url": f"data:image/jpeg;base64,{encoded_image}"}
129
- })
130
- else:
131
- print(f"Warning: Failed to encode image for current message: {img_file_path}")
132
-
133
- # Determine final user_content structure
134
- if not user_content_parts: # No text and no images
135
- print("Warning: Current user message is empty (no text, no images).")
136
- # Depending on API, might need to send empty string or handle this case.
137
- # For now, let it proceed; API might error or interpret as empty prompt.
138
- final_user_content = ""
139
- elif len(user_content_parts) == 1 and user_content_parts[0]["type"] == "text":
140
- final_user_content = user_content_parts[0]["text"] # Text-only, pass as string
 
 
 
 
 
 
141
  else:
142
- final_user_content = user_content_parts # Multimodal, pass as list of dicts
 
143
 
144
- # Prepare messages list for the API
145
  messages = [{"role": "system", "content": system_message}]
146
-
147
- for hist_user_content, hist_assistant_content in history:
148
- # hist_user_content can be string (text) or tuple (text, [image_paths])
149
- if hist_user_content:
150
- if isinstance(hist_user_content, tuple) and len(hist_user_content) == 2:
151
- # Multimodal history entry: (text, [list_of_image_paths])
152
- hist_text, hist_image_paths = hist_user_content
153
- current_hist_user_parts = []
154
- if hist_text and hist_text.strip():
155
- current_hist_user_parts.append({"type": "text", "text": hist_text})
156
- if hist_image_paths:
157
- for hist_img_path in hist_image_paths:
158
- encoded_hist_img = encode_image(hist_img_path)
159
- if encoded_hist_img:
160
- current_hist_user_parts.append({
161
- "type": "image_url",
162
- "image_url": {"url": f"data:image/jpeg;base64,{encoded_hist_img}"}
163
- })
164
- else:
165
- print(f"Warning: Failed to encode history image: {hist_img_path}")
166
- if current_hist_user_parts: # Only add if there's content
167
- messages.append({"role": "user", "content": current_hist_user_parts})
168
-
169
- elif isinstance(hist_user_content, str): # Text-only history entry
170
- messages.append({"role": "user", "content": hist_user_content})
 
 
 
 
 
 
 
171
  else:
172
- print(f"Warning: Unexpected type for history user content: {type(hist_user_content)}")
173
-
174
- if hist_assistant_content:
175
- messages.append({"role": "assistant", "content": hist_assistant_content})
 
 
 
176
 
177
- messages.append({"role": "user", "content": final_user_content})
178
- # print(f"Final messages object for API: {json.dumps(messages, indent=2)}") # Very verbose, use for deep debugging
 
179
 
 
180
  model_to_use = custom_model.strip() if custom_model.strip() != "" else selected_model
181
- print(f"Model selected for inference: '{model_to_use}'")
182
 
183
- response_text = ""
184
- print(f"Sending request to provider '{provider}' for model '{model_to_use}'. Streaming enabled.")
 
185
 
 
186
  parameters = {
187
  "max_tokens": max_tokens,
188
  "temperature": temperature,
189
  "top_p": top_p,
190
  "frequency_penalty": frequency_penalty,
191
  }
 
192
  if seed is not None:
193
  parameters["seed"] = seed
194
 
 
195
  try:
 
196
  stream = client.chat_completion(
197
  model=model_to_use,
198
  messages=messages,
@@ -200,316 +190,378 @@ def respond(
200
  **parameters
201
  )
202
 
203
- # print("Streaming response tokens: ", end="", flush=True) # Can be noisy
 
 
204
  for chunk in stream:
205
  if hasattr(chunk, 'choices') and len(chunk.choices) > 0:
206
- delta = chunk.choices[0].delta
207
- if delta and hasattr(delta, 'content') and delta.content:
208
- token_text = delta.content
209
- # print(token_text, end="", flush=True) # Handled by yield
210
- response_text += token_text
211
- yield response_text
212
- # print("\nStream ended.")
 
 
213
  except Exception as e:
214
- error_message = f"{type(e).__name__}: {str(e)}"
215
- print(f"ERROR DURING INFERENCE: {error_message}")
216
- # If it's a client error (4xx), the request body might be relevant
217
- if hasattr(e, 'response') and e.response is not None:
218
- print(f"Error details: Status {e.response.status_code}. Response text: {e.response.text}")
219
- if 400 <= e.response.status_code < 500:
220
- try:
221
- print(f"Offending request messages payload (first 1000 chars): {json.dumps(messages, indent=2)[:1000]}")
222
- except Exception as E:
223
- print(f"Could not dump messages payload: {E}")
224
-
225
- response_text += f"\nAn error occurred: {error_message}"
226
- yield response_text
227
-
228
- print("Completed response generation for current call.")
229
 
 
230
 
231
  # Function to validate provider selection based on BYOK
232
- def validate_provider(api_key, provider_choice): # Renamed provider to provider_choice
233
- # This function's purpose was to force hf-inference if no BYOK for other providers.
234
- # However, InferenceClient handles provider-specific keys or HF token routing.
235
- # For now, let's assume any key might work with any provider and let InferenceClient handle it.
236
- # If a custom key is entered, it *could* be for any provider.
237
- # If no custom key, and ACCESS_TOKEN is used, it's an HF_TOKEN, best for hf-inference or HF-managed providers.
238
- # The current logic doesn't strictly need this validation if we trust InferenceClient.
239
- # Keeping it simple:
240
- # if not api_key.strip() and provider_choice != "hf-inference":
241
- # print(f"No BYOK, but provider '{provider_choice}' selected. Forcing 'hf-inference'.")
242
- # return gr.update(value="hf-inference")
243
- return gr.update(value=provider_choice) # No change for now, allow user selection.
244
 
245
  # GRADIO UI
246
  with gr.Blocks(theme="Nymbo/Nymbo_Theme") as demo:
 
247
  chatbot = gr.Chatbot(
248
  height=600,
249
  show_copy_button=True,
250
- placeholder="Select a model, enter your message, and upload images if needed.",
251
- layout="panel",
252
- avatar_images=(None, "https://huggingface.co/chat/huggingchat/logo.svg") # Example bot avatar
253
  )
 
254
 
 
255
  msg = gr.MultimodalTextbox(
256
  placeholder="Type a message or upload images...",
257
  show_label=False,
258
  container=False,
259
- scale=12, # Ensure this is within a gr.Row() or similar if scale is used effectively
260
  file_types=["image"],
261
- file_count="multiple", # Allows multiple image uploads
262
- sources=["upload"] # Can add "clipboard"
263
  )
264
 
 
 
 
265
  with gr.Accordion("Settings", open=False):
 
266
  system_message_box = gr.Textbox(
267
  value="You are a helpful AI assistant that can understand images and text.",
268
  placeholder="You are a helpful assistant.",
269
  label="System Prompt"
270
  )
271
 
 
272
  with gr.Row():
273
  with gr.Column():
274
- max_tokens_slider = gr.Slider(1, 4096, value=512, step=1, label="Max new tokens")
275
- temperature_slider = gr.Slider(0.1, 2.0, value=0.7, step=0.05, label="Temperature") # Range adjusted
276
- top_p_slider = gr.Slider(0.1, 1.0, value=0.95, step=0.05, label="Top-P")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
277
  with gr.Column():
278
- frequency_penalty_slider = gr.Slider(-2.0, 2.0, value=0.0, step=0.1, label="Frequency Penalty")
279
- seed_slider = gr.Slider(-1, 65535, value=-1, step=1, label="Seed (-1 for random)")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
280
 
281
- providers_list = ["hf-inference", "cerebras", "together", "sambanova", "novita", "cohere", "fireworks-ai", "hyperbolic", "nebius"]
282
- provider_radio = gr.Radio(choices=providers_list, value="hf-inference", label="Inference Provider")
 
 
 
283
 
 
284
  byok_textbox = gr.Textbox(
285
- value="", label="BYOK (Bring Your Own Key)",
286
- info="Enter your API key. For 'hf-inference', use an HF token. For other providers, use their specific key or an HF token if supported.",
287
- placeholder="Enter your API token here", type="password"
 
 
288
  )
289
 
 
290
  custom_model_box = gr.Textbox(
291
- value="", label="Custom Model ID / Endpoint",
292
- info="(Optional) Provide a custom model ID (e.g., 'meta-llama/Llama-3-70b-chat-hf') or full endpoint URL. Overrides featured model selection.",
293
- placeholder="org/model-name or full URL"
 
294
  )
295
 
296
- model_search_box = gr.Textbox(label="Filter Featured Models", placeholder="Search...", lines=1)
 
 
 
 
 
297
 
 
 
298
  models_list = [
299
- "meta-llama/Llama-3.2-11B-Vision-Instruct", "meta-llama/Llama-3.3-70B-Instruct",
300
- "meta-llama/Llama-3.1-70B-Instruct", "meta-llama/Llama-3.0-70B-Instruct",
301
- "meta-llama/Llama-3.2-3B-Instruct", "meta-llama/Llama-3.2-1B-Instruct",
302
- "meta-llama/Llama-3.1-8B-Instruct", "NousResearch/Hermes-3-Llama-3.1-8B",
303
- "NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO", "mistralai/Mistral-Nemo-Instruct-2407",
304
- "mistralai/Mixtral-8x7B-Instruct-v0.1", "mistralai/Mistral-7B-Instruct-v0.3",
305
- "mistralai/Mistral-7B-Instruct-v0.2", "Qwen/Qwen3-235B-A22B", "Qwen/Qwen3-32B",
306
- "Qwen/Qwen2.5-72B-Instruct", "Qwen/Qwen2.5-3B-Instruct", "Qwen/Qwen2.5-0.5B-Instruct",
307
- "Qwen/QwQ-32B", "Qwen/Qwen2.5-Coder-32B-Instruct", "microsoft/Phi-3.5-mini-instruct",
308
- "microsoft/Phi-3-mini-128k-instruct", "microsoft/Phi-3-mini-4k-instruct",
 
 
 
 
 
 
 
 
 
 
 
 
 
309
  ]
 
310
  featured_model_radio = gr.Radio(
311
- label="Select a Featured Model", choices=models_list,
312
- value="meta-llama/Llama-3.2-11B-Vision-Instruct", interactive=True
 
 
313
  )
314
- gr.Markdown("[All Text-to-Text Models](https://huggingface.co/models?inference_provider=all&pipeline_tag=text-generation&sort=trending) | [All Multimodal Models](https://huggingface.co/models?inference_provider=all&pipeline_tag=image-text-to-text&sort=trending)")
315
-
316
- # Chat history state (remains gr.State for proper handling by Gradio)
317
- # The `chatbot` component itself manages its display state.
318
- # We need a separate state if we want to manipulate the history structure before passing to API.
319
- # The current `bot` function takes `chatbot` (which is history) directly.
320
-
321
- # Revised user function for MultimodalTextbox
322
- # It appends the user's input (text and/or files) to the chatbot history.
323
- # The `bot` function will then process this history.
324
- def handle_user_input(multimodal_input, chat_history_list):
325
- text_input = multimodal_input.get("text", "").strip()
326
- file_inputs = multimodal_input.get("files", []) # List of file paths
327
-
328
- # print(f"User input: Text='{text_input}', Files={file_inputs}")
329
-
330
- if not text_input and not file_inputs:
331
- # print("User input empty, not adding to history.")
332
- return chat_history_list # No change if input is empty
333
 
334
- # For multimodal display in chatbot, we can represent images using Markdown.
335
- # The actual file paths will be used by `respond` for API calls.
336
- # We need to decide how to store this in history for `respond`
337
- # Option 1: Store (text, [paths]) tuple for user turns.
338
- # Option 2: Create separate entries for text and images.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
339
 
340
- # Let's use Option 1 for structured history, easier for `respond`
341
- # The `chatbot` component can display a text representation.
 
 
342
 
343
- display_entry_user = ""
344
- if text_input:
345
- display_entry_user += text_input
346
 
347
- # For display in chatbot, we can use Markdown for images.
348
- # For passing to `respond` via history, we need the actual paths.
349
- # The `bot` function will unpack this.
350
 
351
- # For `chatbot` display:
352
- # If there are images, we can create a text representation.
353
- # For example, just list "<image1> <image2>" or use Markdown if supported for local files.
354
- # Gradio Chatbot displays images if the path is a local temp file path.
355
 
356
- user_turn_content_for_api = (text_input, [f.name for f in file_inputs if f] if file_inputs else [])
357
-
358
- # For chatbot display:
359
- # Gradio's Chatbot can display images directly if you pass a list like:
360
- # [[(image_path1,), (image_path2,)], None] for an image-only user message
361
- # Or [[text_input, (image_path1,)], None]
362
- # Let's try to prepare for this.
363
-
364
- if file_inputs:
365
- # If there's text AND files, Gradio expects text first, then tuples for files.
366
- # e.g., history.append( [ [text_input] + [(file.name,) for file in file_inputs], None] )
367
- # Or, more simply, if Chatbot handles multimodal input display well:
368
- chatbot_user_message = []
369
- if text_input:
370
- chatbot_user_message.append(text_input)
371
- for file_obj in file_inputs:
372
- if file_obj and hasattr(file_obj, 'name'): # file_obj is a TemporaryFileWrapper
373
- chatbot_user_message.append((file_obj.name,)) # Tuple for image path
374
 
375
- chat_history_list.append([chatbot_user_message, None])
376
-
377
- elif text_input: # Text only
378
- chat_history_list.append([text_input, None])
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
379
 
380
- # The `bot` function will receive `chat_history_list`.
381
- # It needs to reconstruct text and image paths from `chat_history_list[-1][0]`
382
- # to pass to `respond`'s `message` and `image_files` parameters.
383
-
384
- return chat_history_list
385
-
386
-
387
- # Revised bot function to handle history from handle_user_input
388
- def process_bot_response(
389
- current_chat_history, # This is the full history from the chatbot
390
- system_msg, max_tkns, temp, tp_p, freq_pen, sd, prov, api_k, cust_model, srch_term, sel_model
391
- ):
392
- if not current_chat_history or not current_chat_history[-1][0]:
393
- print("Bot: History is empty or last user message is empty.")
394
- return current_chat_history # Or yield current_chat_history
395
-
396
- last_user_turn_content = current_chat_history[-1][0] # This is what handle_user_input created
397
 
398
- # Extract text and image paths from last_user_turn_content
399
- current_message_text = ""
400
- current_image_paths = []
401
-
402
- if isinstance(last_user_turn_content, str): # Text-only
403
- current_message_text = last_user_turn_content
404
- elif isinstance(last_user_turn_content, list): # Potentially multimodal from handle_user_input
405
- for item in last_user_turn_content:
406
- if isinstance(item, str):
407
- current_message_text = item # Assumes one text part
408
- elif isinstance(item, tuple) and len(item) > 0 and isinstance(item[0], str):
409
- current_image_paths.append(item[0]) # item[0] is the image path
410
 
411
- # print(f"Bot: Extracted for respond - Text='{current_message_text}', Images={current_image_paths}")
412
-
413
- # History for `respond` should be all turns *except* the current one.
414
- history_for_api = []
415
- for user_content, assistant_content in current_chat_history[:-1]:
416
- # Reconstruct (text, [paths]) structure for history items if they were multimodal
417
- # This part needs careful handling if history itself contains multimodal user turns
418
- # For simplicity, assuming history user_content is string or already (text, [paths])
419
- # The current `handle_user_input` makes `user_content` a list for multimodal.
420
- # This needs to be harmonized.
421
-
422
- # Let's simplify: `respond` will parse history. We just pass it.
423
- # The `respond` function's history processing needs to handle the new format.
424
- # The `respond` function expects history items to be:
425
- # user_part: str OR (text_str, [img_paths_list])
426
- # assistant_part: str
427
-
428
- # Let's re-structure history_for_api based on how `handle_user_input` formats it.
429
- # `handle_user_input` stores `chatbot_user_message` which is `[text, (path1,), (path2,)]` or `text`
430
- # `respond` needs to be adapted for this history format if we pass it directly.
431
-
432
- # For now, let's adapt the history passed to `respond` to its expected format.
433
- api_hist_user_entry = None
434
- if isinstance(user_content, str): # Simple text history
435
- api_hist_user_entry = user_content
436
- elif isinstance(user_content, list): # Multimodal history from `handle_user_input`
437
- hist_text = ""
438
- hist_paths = []
439
- for item in user_content:
440
- if isinstance(item, str): hist_text = item
441
- elif isinstance(item, tuple): hist_paths.append(item[0])
442
- api_hist_user_entry = (hist_text, hist_paths)
443
-
444
- history_for_api.append( (api_hist_user_entry, assistant_content) )
445
-
446
-
447
- # Call respond with the current message parts and the processed history
448
- # The `respond` function's first two args are `message` (text) and `image_files` (list of paths)
449
- # for the *current* turn.
450
 
451
- # Clear the placeholder for bot's response in the last history item
452
- current_chat_history[-1][1] = ""
 
 
 
 
 
 
453
 
454
- stream = respond(
455
- current_message_text,
456
- current_image_paths,
457
- history_for_api, # Pass the history *before* the current turn
458
- system_msg, max_tkns, temp, tp_p, freq_pen, sd, prov, api_k, cust_model, srch_term, sel_model
459
- )
460
 
461
- for partial_response in stream:
462
- current_chat_history[-1][1] = partial_response
463
- yield current_chat_history
464
-
465
-
466
- # Event handlers
467
- # 1. User submits message (text and/or files)
468
- # 2. `handle_user_input` updates chatbot history with user's message.
469
- # 3. `process_bot_response` takes this new history, calls API, and streams response back to chatbot.
470
-
471
- submit_event = msg.submit(
472
- handle_user_input,
473
- inputs=[msg, chatbot], # Pass current message and full history
474
- outputs=[chatbot], # Update chatbot with user's message
475
- queue=False # Process user input quickly
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
476
  ).then(
477
- process_bot_response,
478
- inputs=[
479
- chatbot, # Full history including the latest user message
480
- system_message_box, max_tokens_slider, temperature_slider, top_p_slider,
481
- frequency_penalty_slider, seed_slider, provider_radio, byok_textbox,
482
- custom_model_box, model_search_box, featured_model_radio
483
- ],
484
- outputs=[chatbot] # Stream bot's response to chatbot
485
  ).then(
486
- lambda: gr.update(value=None), # Clear MultimodalTextbox (text and files)
487
- None, # No inputs
488
- [msg], # Target component to clear
489
- queue=False
490
  )
491
 
492
- def filter_models_choices(search_term):
493
- # print(f"Filtering models with: '{search_term}'")
494
- if not search_term: return gr.update(choices=models_list)
495
- filtered = [m for m in models_list if search_term.lower() in m.lower()]
496
- # print(f"Filtered models: {filtered}")
497
- return gr.update(choices=filtered if filtered else [])
 
498
 
499
- model_search_box.change(fn=filter_models_choices, inputs=model_search_box, outputs=featured_model_radio)
 
 
 
 
 
 
500
 
501
- # When a featured model is selected, it could optionally update the custom_model_box.
502
- # For now, custom_model_box is an override. If empty, featured_model_radio is used by `respond`.
503
- # No direct link needed unless you want radio to populate custom_model_box.
504
-
505
- # Provider validation (simplified, as InferenceClient handles token logic)
506
- byok_textbox.change(fn=validate_provider, inputs=[byok_textbox, provider_radio], outputs=provider_radio)
507
- provider_radio.change(fn=validate_provider, inputs=[byok_textbox, provider_radio], outputs=provider_radio)
508
 
509
- print("Gradio UI defined. Initializing...")
 
 
 
 
 
 
510
 
 
511
 
512
  if __name__ == "__main__":
513
- print("Launching Gradio demo...")
514
- demo.launch(show_api=True, debug=True) # Enable debug for more Gradio logs
515
- print("Gradio demo launched.")
 
1
  import gradio as gr
2
  from huggingface_hub import InferenceClient
3
  import os
4
+ import json
5
  import base64
6
  from PIL import Image
7
  import io
8
 
9
  ACCESS_TOKEN = os.getenv("HF_TOKEN")
10
+ print("Access token loaded.")
11
 
12
  # Function to encode image to base64
13
+ def encode_image(image_path):
14
+ if not image_path:
15
+ print("No image path provided")
16
  return None
17
 
18
  try:
19
+ print(f"Encoding image from path: {image_path}")
20
 
21
+ # If it's already a PIL Image
22
+ if isinstance(image_path, Image.Image):
23
+ image = image_path
 
 
 
 
 
 
24
  else:
25
+ # Try to open the image file
26
+ image = Image.open(image_path)
27
 
28
+ # Convert to RGB if image has an alpha channel (RGBA)
29
  if image.mode == 'RGBA':
 
30
  image = image.convert('RGB')
31
 
32
+ # Encode to base64
33
  buffered = io.BytesIO()
34
  image.save(buffered, format="JPEG")
35
  img_str = base64.b64encode(buffered.getvalue()).decode("utf-8")
36
+ print("Image encoded successfully")
37
  return img_str
38
  except Exception as e:
39
  print(f"Error encoding image: {e}")
 
41
 
42
  def respond(
43
  message,
44
+ image_files, # Changed parameter name and structure
45
  history: list[tuple[str, str]],
46
  system_message,
47
  max_tokens,
 
50
  frequency_penalty,
51
  seed,
52
  provider,
53
+ custom_api_key,
54
  custom_model,
55
  model_search_term,
56
  selected_model
57
  ):
58
+ print(f"Received message: {message}")
59
+ print(f"Received {len(image_files) if image_files else 0} images")
60
+ print(f"History: {history}")
61
+ print(f"System message: {system_message}")
62
+ print(f"Max tokens: {max_tokens}, Temperature: {temperature}, Top-P: {top_p}")
63
+ print(f"Frequency Penalty: {frequency_penalty}, Seed: {seed}")
64
+ print(f"Selected provider: {provider}")
65
+ print(f"Custom API Key provided: {bool(custom_api_key.strip())}")
66
+ print(f"Selected model (custom_model): {custom_model}")
67
+ print(f"Model search term: {model_search_term}")
68
+ print(f"Selected model from radio: {selected_model}")
69
+
70
+ # Determine which token to use
71
+ token_to_use = custom_api_key if custom_api_key.strip() != "" else ACCESS_TOKEN
72
 
73
+ if custom_api_key.strip() != "":
74
+ print("USING CUSTOM API KEY: BYOK token provided by user is being used for authentication")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
75
  else:
76
+ print("USING DEFAULT API KEY: Environment variable HF_TOKEN is being used for authentication")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
77
 
78
  # Initialize the Inference Client with the provider and appropriate token
79
  client = InferenceClient(token=token_to_use, provider=provider)
80
+ print(f"Hugging Face Inference Client initialized with {provider} provider.")
81
 
82
+ # Convert seed to None if -1 (meaning random)
83
+ if seed == -1:
84
  seed = None
85
 
86
+ # Create multimodal content if images are present
 
 
 
 
87
  if image_files and len(image_files) > 0:
88
+ # Process the user message to include images
89
+ user_content = []
90
+
91
+ # Add text part if there is any
92
+ if message and message.strip():
93
+ user_content.append({
94
+ "type": "text",
95
+ "text": message
96
+ })
97
+
98
+ # Add image parts
99
+ for img in image_files:
100
+ if img is not None:
101
+ # Get raw image data from path
102
+ try:
103
+ encoded_image = encode_image(img)
104
+ if encoded_image:
105
+ user_content.append({
106
+ "type": "image_url",
107
+ "image_url": {
108
+ "url": f"data:image/jpeg;base64,{encoded_image}"
109
+ }
110
+ })
111
+ except Exception as e:
112
+ print(f"Error encoding image: {e}")
113
  else:
114
+ # Text-only message
115
+ user_content = message
116
 
117
+ # Prepare messages in the format expected by the API
118
  messages = [{"role": "system", "content": system_message}]
119
+ print("Initial messages array constructed.")
120
+
121
+ # Add conversation history to the context
122
+ for val in history:
123
+ user_part = val[0]
124
+ assistant_part = val[1]
125
+ if user_part:
126
+ # Handle both text-only and multimodal messages in history
127
+ if isinstance(user_part, tuple) and len(user_part) == 2:
128
+ # This is a multimodal message with text and images
129
+ history_content = []
130
+ if user_part[0]: # Text
131
+ history_content.append({
132
+ "type": "text",
133
+ "text": user_part[0]
134
+ })
135
+
136
+ for img in user_part[1]: # Images
137
+ if img:
138
+ try:
139
+ encoded_img = encode_image(img)
140
+ if encoded_img:
141
+ history_content.append({
142
+ "type": "image_url",
143
+ "image_url": {
144
+ "url": f"data:image/jpeg;base64,{encoded_img}"
145
+ }
146
+ })
147
+ except Exception as e:
148
+ print(f"Error encoding history image: {e}")
149
+
150
+ messages.append({"role": "user", "content": history_content})
151
  else:
152
+ # Regular text message
153
+ messages.append({"role": "user", "content": user_part})
154
+ print(f"Added user message to context (type: {type(user_part)})")
155
+
156
+ if assistant_part:
157
+ messages.append({"role": "assistant", "content": assistant_part})
158
+ print(f"Added assistant message to context: {assistant_part}")
159
 
160
+ # Append the latest user message
161
+ messages.append({"role": "user", "content": user_content})
162
+ print(f"Latest user message appended (content type: {type(user_content)})")
163
 
164
+ # Determine which model to use, prioritizing custom_model if provided
165
  model_to_use = custom_model.strip() if custom_model.strip() != "" else selected_model
166
+ print(f"Model selected for inference: {model_to_use}")
167
 
168
+ # Start with an empty string to build the response as tokens stream in
169
+ response = ""
170
+ print(f"Sending request to {provider} provider.")
171
 
172
+ # Prepare parameters for the chat completion request
173
  parameters = {
174
  "max_tokens": max_tokens,
175
  "temperature": temperature,
176
  "top_p": top_p,
177
  "frequency_penalty": frequency_penalty,
178
  }
179
+
180
  if seed is not None:
181
  parameters["seed"] = seed
182
 
183
+ # Use the InferenceClient for making the request
184
  try:
185
+ # Create a generator for the streaming response
186
  stream = client.chat_completion(
187
  model=model_to_use,
188
  messages=messages,
 
190
  **parameters
191
  )
192
 
193
+ print("Received tokens: ", end="", flush=True)
194
+
195
+ # Process the streaming response
196
  for chunk in stream:
197
  if hasattr(chunk, 'choices') and len(chunk.choices) > 0:
198
+ # Extract the content from the response
199
+ if hasattr(chunk.choices[0], 'delta') and hasattr(chunk.choices[0].delta, 'content'):
200
+ token_text = chunk.choices[0].delta.content
201
+ if token_text:
202
+ print(token_text, end="", flush=True)
203
+ response += token_text
204
+ yield response
205
+
206
+ print()
207
  except Exception as e:
208
+ print(f"Error during inference: {e}")
209
+ response += f"\nError: {str(e)}"
210
+ yield response
 
 
 
 
 
 
 
 
 
 
 
 
211
 
212
+ print("Completed response generation.")
213
 
214
  # Function to validate provider selection based on BYOK
215
+ def validate_provider(api_key, provider):
216
+ if not api_key.strip() and provider != "hf-inference":
217
+ return gr.update(value="hf-inference")
218
+ return gr.update(value=provider)
 
 
 
 
 
 
 
 
219
 
220
  # GRADIO UI
221
  with gr.Blocks(theme="Nymbo/Nymbo_Theme") as demo:
222
+ # Create the chatbot component
223
  chatbot = gr.Chatbot(
224
  height=600,
225
  show_copy_button=True,
226
+ placeholder="Select a model and begin chatting. Now supports multiple inference providers and multimodal inputs",
227
+ layout="panel"
 
228
  )
229
+ print("Chatbot interface created.")
230
 
231
+ # Multimodal textbox for messages (combines text and file uploads)
232
  msg = gr.MultimodalTextbox(
233
  placeholder="Type a message or upload images...",
234
  show_label=False,
235
  container=False,
236
+ scale=12,
237
  file_types=["image"],
238
+ file_count="multiple",
239
+ sources=["upload"]
240
  )
241
 
242
+ # Note: We're removing the separate submit button since MultimodalTextbox has its own
243
+
244
+ # Create accordion for settings
245
  with gr.Accordion("Settings", open=False):
246
+ # System message
247
  system_message_box = gr.Textbox(
248
  value="You are a helpful AI assistant that can understand images and text.",
249
  placeholder="You are a helpful assistant.",
250
  label="System Prompt"
251
  )
252
 
253
+ # Generation parameters
254
  with gr.Row():
255
  with gr.Column():
256
+ max_tokens_slider = gr.Slider(
257
+ minimum=1,
258
+ maximum=4096,
259
+ value=512,
260
+ step=1,
261
+ label="Max tokens"
262
+ )
263
+
264
+ temperature_slider = gr.Slider(
265
+ minimum=0.1,
266
+ maximum=4.0,
267
+ value=0.7,
268
+ step=0.1,
269
+ label="Temperature"
270
+ )
271
+
272
+ top_p_slider = gr.Slider(
273
+ minimum=0.1,
274
+ maximum=1.0,
275
+ value=0.95,
276
+ step=0.05,
277
+ label="Top-P"
278
+ )
279
+
280
  with gr.Column():
281
+ frequency_penalty_slider = gr.Slider(
282
+ minimum=-2.0,
283
+ maximum=2.0,
284
+ value=0.0,
285
+ step=0.1,
286
+ label="Frequency Penalty"
287
+ )
288
+
289
+ seed_slider = gr.Slider(
290
+ minimum=-1,
291
+ maximum=65535,
292
+ value=-1,
293
+ step=1,
294
+ label="Seed (-1 for random)"
295
+ )
296
+
297
+ # Provider selection
298
+ providers_list = [
299
+ "hf-inference", # Default Hugging Face Inference
300
+ "cerebras", # Cerebras provider
301
+ "together", # Together AI
302
+ "sambanova", # SambaNova
303
+ "novita", # Novita AI
304
+ "cohere", # Cohere
305
+ "fireworks-ai", # Fireworks AI
306
+ "hyperbolic", # Hyperbolic
307
+ "nebius", # Nebius
308
+ ]
309
 
310
+ provider_radio = gr.Radio(
311
+ choices=providers_list,
312
+ value="hf-inference",
313
+ label="Inference Provider",
314
+ )
315
 
316
+ # New BYOK textbox
317
  byok_textbox = gr.Textbox(
318
+ value="",
319
+ label="BYOK (Bring Your Own Key)",
320
+ info="Enter a custom Hugging Face API key here. When empty, only 'hf-inference' provider can be used.",
321
+ placeholder="Enter your Hugging Face API token",
322
+ type="password" # Hide the API key for security
323
  )
324
 
325
+ # Custom model box
326
  custom_model_box = gr.Textbox(
327
+ value="",
328
+ label="Custom Model",
329
+ info="(Optional) Provide a custom Hugging Face model path. Overrides any selected featured model.",
330
+ placeholder="meta-llama/Llama-3.3-70B-Instruct"
331
  )
332
 
333
+ # Model search
334
+ model_search_box = gr.Textbox(
335
+ label="Filter Models",
336
+ placeholder="Search for a featured model...",
337
+ lines=1
338
+ )
339
 
340
+ # Featured models list
341
+ # Updated to include multimodal models
342
  models_list = [
343
+ "meta-llama/Llama-3.2-11B-Vision-Instruct",
344
+ "meta-llama/Llama-3.3-70B-Instruct",
345
+ "meta-llama/Llama-3.1-70B-Instruct",
346
+ "meta-llama/Llama-3.0-70B-Instruct",
347
+ "meta-llama/Llama-3.2-3B-Instruct",
348
+ "meta-llama/Llama-3.2-1B-Instruct",
349
+ "meta-llama/Llama-3.1-8B-Instruct",
350
+ "NousResearch/Hermes-3-Llama-3.1-8B",
351
+ "NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO",
352
+ "mistralai/Mistral-Nemo-Instruct-2407",
353
+ "mistralai/Mixtral-8x7B-Instruct-v0.1",
354
+ "mistralai/Mistral-7B-Instruct-v0.3",
355
+ "mistralai/Mistral-7B-Instruct-v0.2",
356
+ "Qwen/Qwen3-235B-A22B",
357
+ "Qwen/Qwen3-32B",
358
+ "Qwen/Qwen2.5-72B-Instruct",
359
+ "Qwen/Qwen2.5-3B-Instruct",
360
+ "Qwen/Qwen2.5-0.5B-Instruct",
361
+ "Qwen/QwQ-32B",
362
+ "Qwen/Qwen2.5-Coder-32B-Instruct",
363
+ "microsoft/Phi-3.5-mini-instruct",
364
+ "microsoft/Phi-3-mini-128k-instruct",
365
+ "microsoft/Phi-3-mini-4k-instruct",
366
  ]
367
+
368
  featured_model_radio = gr.Radio(
369
+ label="Select a model below",
370
+ choices=models_list,
371
+ value="meta-llama/Llama-3.2-11B-Vision-Instruct", # Default to a multimodal model
372
+ interactive=True
373
  )
374
+
375
+ gr.Markdown("[View all Text-to-Text models](https://huggingface.co/models?inference_provider=all&pipeline_tag=text-generation&sort=trending) | [View all multimodal models](https://huggingface.co/models?inference_provider=all&pipeline_tag=image-text-to-text&sort=trending)")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
376
 
377
+ # Chat history state
378
+ chat_history = gr.State([])
379
+
380
+ # Function to filter models
381
+ def filter_models(search_term):
382
+ print(f"Filtering models with search term: {search_term}")
383
+ filtered = [m for m in models_list if search_term.lower() in m.lower()]
384
+ print(f"Filtered models: {filtered}")
385
+ return gr.update(choices=filtered)
386
+
387
+ # Function to set custom model from radio
388
+ def set_custom_model_from_radio(selected):
389
+ print(f"Featured model selected: {selected}")
390
+ return selected
391
+
392
+ # Function for the chat interface
393
+ def user(user_message, history):
394
+ # Debug logging for troubleshooting
395
+ print(f"User message received: {user_message}")
396
 
397
+ # Skip if message is empty (no text and no files)
398
+ if not user_message or (not user_message.get("text") and not user_message.get("files")):
399
+ print("Empty message, skipping")
400
+ return history
401
 
402
+ # Prepare multimodal message format
403
+ text_content = user_message.get("text", "").strip()
404
+ files = user_message.get("files", [])
405
 
406
+ print(f"Text content: {text_content}")
407
+ print(f"Files: {files}")
 
408
 
409
+ # If both text and files are empty, skip
410
+ if not text_content and not files:
411
+ print("No content to display")
412
+ return history
413
 
414
+ # Add message with images to history
415
+ if files and len(files) > 0:
416
+ # Add text message first if it exists
417
+ if text_content:
418
+ # Add a separate text message
419
+ print(f"Adding text message: {text_content}")
420
+ history.append([text_content, None])
 
 
 
 
 
 
 
 
 
 
 
421
 
422
+ # Then add each image file separately
423
+ for file_path in files:
424
+ if file_path and isinstance(file_path, str):
425
+ print(f"Adding image: {file_path}")
426
+ # Add image as a separate message with no text
427
+ history.append([f"![Image]({file_path})", None])
428
+
429
+ return history
430
+ else:
431
+ # For text-only messages
432
+ print(f"Adding text-only message: {text_content}")
433
+ history.append([text_content, None])
434
+ return history
435
+
436
+ # Define bot response function
437
+ def bot(history, system_msg, max_tokens, temperature, top_p, freq_penalty, seed, provider, api_key, custom_model, search_term, selected_model):
438
+ # Check if history is valid
439
+ if not history or len(history) == 0:
440
+ print("No history to process")
441
+ return history
442
 
443
+ # Get the most recent message and detect if it's an image
444
+ user_message = history[-1][0]
445
+ print(f"Processing user message: {user_message}")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
446
 
447
+ is_image = False
448
+ image_path = None
449
+ text_content = user_message
 
 
 
 
 
 
 
 
 
450
 
451
+ # Check if this is an image message (marked with ![Image])
452
+ if isinstance(user_message, str) and user_message.startswith("![Image]("):
453
+ is_image = True
454
+ # Extract image path from markdown format ![Image](path)
455
+ image_path = user_message.replace("![Image](", "").replace(")", "")
456
+ print(f"Image detected: {image_path}")
457
+ text_content = "" # No text for image-only messages
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
458
 
459
+ # Look back for text context if this is an image
460
+ text_context = ""
461
+ if is_image and len(history) > 1:
462
+ # Use the previous message as context if it's text
463
+ prev_message = history[-2][0]
464
+ if isinstance(prev_message, str) and not prev_message.startswith("![Image]("):
465
+ text_context = prev_message
466
+ print(f"Using text context from previous message: {text_context}")
467
 
468
+ # Process message through respond function
469
+ history[-1][1] = ""
 
 
 
 
470
 
471
+ # Use either the image or text for the API
472
+ if is_image:
473
+ # For image messages
474
+ for response in respond(
475
+ text_context, # Text context from previous message if any
476
+ [image_path], # Current image
477
+ history[:-1], # Previous history
478
+ system_msg,
479
+ max_tokens,
480
+ temperature,
481
+ top_p,
482
+ freq_penalty,
483
+ seed,
484
+ provider,
485
+ api_key,
486
+ custom_model,
487
+ search_term,
488
+ selected_model
489
+ ):
490
+ history[-1][1] = response
491
+ yield history
492
+ else:
493
+ # For text-only messages
494
+ for response in respond(
495
+ text_content, # Text message
496
+ None, # No image
497
+ history[:-1], # Previous history
498
+ system_msg,
499
+ max_tokens,
500
+ temperature,
501
+ top_p,
502
+ freq_penalty,
503
+ seed,
504
+ provider,
505
+ api_key,
506
+ custom_model,
507
+ search_term,
508
+ selected_model
509
+ ):
510
+ history[-1][1] = response
511
+ yield history
512
+
513
+ # Event handlers - only using the MultimodalTextbox's built-in submit functionality
514
+ msg.submit(
515
+ user,
516
+ [msg, chatbot],
517
+ [chatbot],
518
+ queue=False
519
  ).then(
520
+ bot,
521
+ [chatbot, system_message_box, max_tokens_slider, temperature_slider, top_p_slider,
522
+ frequency_penalty_slider, seed_slider, provider_radio, byok_textbox, custom_model_box,
523
+ model_search_box, featured_model_radio],
524
+ [chatbot]
 
 
 
525
  ).then(
526
+ lambda: {"text": "", "files": []}, # Clear inputs after submission
527
+ None,
528
+ [msg]
 
529
  )
530
 
531
+ # Connect the model filter to update the radio choices
532
+ model_search_box.change(
533
+ fn=filter_models,
534
+ inputs=model_search_box,
535
+ outputs=featured_model_radio
536
+ )
537
+ print("Model search box change event linked.")
538
 
539
+ # Connect the featured model radio to update the custom model box
540
+ featured_model_radio.change(
541
+ fn=set_custom_model_from_radio,
542
+ inputs=featured_model_radio,
543
+ outputs=custom_model_box
544
+ )
545
+ print("Featured model radio button change event linked.")
546
 
547
+ # Connect the BYOK textbox to validate provider selection
548
+ byok_textbox.change(
549
+ fn=validate_provider,
550
+ inputs=[byok_textbox, provider_radio],
551
+ outputs=provider_radio
552
+ )
553
+ print("BYOK textbox change event linked.")
554
 
555
+ # Also validate provider when the radio changes to ensure consistency
556
+ provider_radio.change(
557
+ fn=validate_provider,
558
+ inputs=[byok_textbox, provider_radio],
559
+ outputs=provider_radio
560
+ )
561
+ print("Provider radio button change event linked.")
562
 
563
+ print("Gradio interface initialized.")
564
 
565
  if __name__ == "__main__":
566
+ print("Launching the demo application.")
567
+ demo.launch(show_api=True)