cstr commited on
Commit
5fd37a0
·
verified ·
1 Parent(s): e725dc0

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +59 -17
app.py CHANGED
@@ -114,18 +114,27 @@ MODELS = [
114
  ("Hugging Face: Zephyr 7B", "huggingfaceh4/zephyr-7b-beta:free", 4096),
115
  ("MythoMax 13B", "gryphe/mythomax-l2-13b:free", 4096),
116
  ]},
117
-
118
  # Vision-capable Models
119
  {"category": "Vision Models", "models": [
120
- ("Meta: Llama 3.2 11B Vision Instruct", "meta-llama/llama-3.2-11b-vision-instruct:free", 131072),
121
- ("Qwen: Qwen2.5 VL 72B Instruct", "qwen/qwen2.5-vl-72b-instruct:free", 131072),
122
- ("Qwen: Qwen2.5 VL 32B Instruct", "qwen/qwen2.5-vl-32b-instruct:free", 8192),
123
- ("Qwen: Qwen2.5-VL 7B Instruct", "qwen/qwen-2.5-vl-7b-instruct:free", 64000),
124
- ("Qwen: Qwen2.5 VL 3B Instruct", "qwen/qwen2.5-vl-3b-instruct:free", 64000),
125
  ("Google: Gemini Pro 2.0 Experimental", "google/gemini-2.0-pro-exp-02-05:free", 2000000),
126
- ("Google: Gemini Pro 2.5 Experimental", "google/gemini-2.5-pro-exp-03-25:free", 1000000),
127
- ("Google: Gemini 2.0 Flash Thinking Experimental", "google/gemini-2.0-flash-thinking-exp:free", 1048576),
128
  ("Google: Gemini Flash 2.0 Experimental", "google/gemini-2.0-flash-exp:free", 1048576),
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
129
  ("AllenAI: Molmo 7B D", "allenai/molmo-7b-d:free", 4096),
130
  ]},
131
  ]
@@ -349,7 +358,7 @@ def ask_ai(message, chatbot, model_choice, temperature, max_tokens, top_p,
349
  headers={
350
  "Content-Type": "application/json",
351
  "Authorization": f"Bearer {OPENROUTER_API_KEY}",
352
- "HTTP-Referer": "https://huggingface.co/spaces"
353
  },
354
  json=payload,
355
  timeout=180, # Longer timeout for document processing and streaming
@@ -382,13 +391,33 @@ def ask_ai(message, chatbot, model_choice, temperature, max_tokens, top_p,
382
 
383
  elif response.status_code == 200:
384
  # Handle normal response
385
- result = response.json()
386
- ai_response = result.get("choices", [{}])[0].get("message", {}).get("content", "")
387
- chatbot = chatbot + [[message, ai_response]]
388
-
389
- # Log token usage if available
390
- if "usage" in result:
391
- logger.info(f"Token usage: {result['usage']}")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
392
  else:
393
  response_text = response.text
394
  logger.info(f"Error response body: {response_text}")
@@ -435,16 +464,29 @@ def update_context_display(model_name):
435
  return "Unknown"
436
 
437
  # Helper function for model info display
 
438
  def update_model_info(model_name):
439
  for model in ALL_MODELS:
440
  if model[0] == model_name:
441
  name, model_id, context_size = model
 
 
 
 
 
 
 
 
 
 
 
442
  return f"""
443
  <div class="model-info">
444
- <h3>{name}</h3>
445
  <p><strong>Model ID:</strong> {model_id}</p>
446
  <p><strong>Context Size:</strong> {context_size:,} tokens</p>
447
  <p><strong>Provider:</strong> {model_id.split('/')[0]}</p>
 
448
  </div>
449
  """
450
  return "<p>Model information not available</p>"
 
114
  ("Hugging Face: Zephyr 7B", "huggingfaceh4/zephyr-7b-beta:free", 4096),
115
  ("MythoMax 13B", "gryphe/mythomax-l2-13b:free", 4096),
116
  ]},
117
+
118
  # Vision-capable Models
119
  {"category": "Vision Models", "models": [
 
 
 
 
 
120
  ("Google: Gemini Pro 2.0 Experimental", "google/gemini-2.0-pro-exp-02-05:free", 2000000),
121
+ ("Google: Gemini 2.0 Flash Thinking Experimental 01-21", "google/gemini-2.0-flash-thinking-exp:free", 1048576),
 
122
  ("Google: Gemini Flash 2.0 Experimental", "google/gemini-2.0-flash-exp:free", 1048576),
123
+ ("Google: Gemini Pro 2.5 Experimental", "google/gemini-2.5-pro-exp-03-25:free", 1000000),
124
+ ("Google: Gemini Flash 1.5 8B Experimental", "google/gemini-flash-1.5-8b-exp", 1000000),
125
+ ("Google: Gemma 3 4B", "google/gemma-3-4b-it:free", 131072),
126
+ ("Google: Gemma 3 12B", "google/gemma-3-12b-it:free", 131072),
127
+ ("Qwen: Qwen2.5 VL 72B Instruct", "qwen/qwen2.5-vl-72b-instruct:free", 131072),
128
+ ("Meta: Llama 3.2 11B Vision Instruct", "meta-llama/llama-3.2-11b-vision-instruct:free", 131072),
129
+ ("Mistral: Mistral Small 3.1 24B", "mistralai/mistral-small-3.1-24b-instruct:free", 96000),
130
+ ("Google: Gemma 3 27B", "google/gemma-3-27b-it:free", 96000),
131
+ ("Qwen: Qwen2.5 VL 3B Instruct", "qwen/qwen2.5-vl-3b-instruct:free", 64000),
132
+ ("Qwen: Qwen2.5-VL 7B Instruct", "qwen/qwen-2.5-vl-7b-instruct:free", 64000),
133
+ ("Google: LearnLM 1.5 Pro Experimental", "google/learnlm-1.5-pro-experimental:free", 40960),
134
+ ("Google: Gemini 2.0 Flash Thinking Experimental", "google/gemini-2.0-flash-thinking-exp-1219:free", 40000),
135
+ ("Bytedance: UI-TARS 72B", "bytedance-research/ui-tars-72b:free", 32768),
136
+ ("Google: Gemma 3 1B", "google/gemma-3-1b-it:free", 32768),
137
+ ("Qwen: Qwen2.5 VL 32B Instruct", "qwen/qwen2.5-vl-32b-instruct:free", 8192),
138
  ("AllenAI: Molmo 7B D", "allenai/molmo-7b-d:free", 4096),
139
  ]},
140
  ]
 
358
  headers={
359
  "Content-Type": "application/json",
360
  "Authorization": f"Bearer {OPENROUTER_API_KEY}",
361
+ "HTTP-Referer": "https://huggingface.co/spaces/cstr/CrispStrobe"
362
  },
363
  json=payload,
364
  timeout=180, # Longer timeout for document processing and streaming
 
391
 
392
  elif response.status_code == 200:
393
  # Handle normal response
394
+ try:
395
+ result = response.json()
396
+ # Add detailed logging to debug what's in the response
397
+ logger.info(f"Response content: {result}")
398
+
399
+ ai_response = ""
400
+ if "choices" in result and len(result["choices"]) > 0:
401
+ if "message" in result["choices"][0]:
402
+ ai_response = result["choices"][0]["message"].get("content", "")
403
+ elif "delta" in result["choices"][0]:
404
+ ai_response = result["choices"][0]["delta"].get("content", "")
405
+ else:
406
+ logger.error(f"Unexpected response structure: {result}")
407
+ ai_response = "Error: Unexpected response structure from API"
408
+ else:
409
+ logger.error(f"No choices in response: {result}")
410
+ ai_response = "Error: No response received from the model"
411
+
412
+ chatbot = chatbot + [[message, ai_response]]
413
+
414
+ # Log token usage if available
415
+ if "usage" in result:
416
+ logger.info(f"Token usage: {result['usage']}")
417
+ except Exception as e:
418
+ logger.error(f"Error processing response: {str(e)}")
419
+ logger.error(f"Response raw text: {response.text}")
420
+ chatbot = chatbot + [[message, f"Error processing response: {str(e)}"]]
421
  else:
422
  response_text = response.text
423
  logger.info(f"Error response body: {response_text}")
 
464
  return "Unknown"
465
 
466
  # Helper function for model info display
467
+ # Update the model info display function to indicate vision capability
468
  def update_model_info(model_name):
469
  for model in ALL_MODELS:
470
  if model[0] == model_name:
471
  name, model_id, context_size = model
472
+
473
+ # Check if this is a vision model
474
+ is_vision_model = False
475
+ for cat in MODELS:
476
+ if cat["category"] == "Vision Models":
477
+ if any(m[0] == model_name for m in cat["models"]):
478
+ is_vision_model = True
479
+ break
480
+
481
+ vision_badge = '<span style="background-color: #4CAF50; color: white; padding: 3px 6px; border-radius: 3px; font-size: 0.8em; margin-left: 5px;">Vision</span>' if is_vision_model else ''
482
+
483
  return f"""
484
  <div class="model-info">
485
+ <h3>{name} {vision_badge}</h3>
486
  <p><strong>Model ID:</strong> {model_id}</p>
487
  <p><strong>Context Size:</strong> {context_size:,} tokens</p>
488
  <p><strong>Provider:</strong> {model_id.split('/')[0]}</p>
489
+ {f'<p><strong>Features:</strong> Supports image understanding</p>' if is_vision_model else ''}
490
  </div>
491
  """
492
  return "<p>Model information not available</p>"