cstr commited on
Commit
7050196
·
verified ·
1 Parent(s): 453c62c

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +81 -82
app.py CHANGED
@@ -164,15 +164,14 @@ def encode_image_to_base64(image_path):
164
  elif file_extension == "webp":
165
  mime_type = "image/webp"
166
  return f"data:{mime_type};base64,{encoded_string}"
167
- else: # Pillow Image or file-like object
168
- if Image is not None:
169
- buffered = io.BytesIO()
170
- image_path.save(buffered, format="PNG")
171
- encoded_string = base64.b64encode(buffered.getvalue()).decode('utf-8')
172
- return f"data:image/png;base64,{encoded_string}"
173
- else:
174
- logger.error("PIL is not installed, cannot process image object")
175
- return None
176
  except Exception as e:
177
  logger.error(f"Error encoding image: {str(e)}")
178
  return None
@@ -258,40 +257,13 @@ def prepare_message_with_media(text, images=None, documents=None):
258
 
259
  return content
260
 
261
- def filter_models(search_term):
262
- """Filter models based on search term"""
263
- if not search_term:
264
- return gr.Dropdown.update(choices=[model[0] for model in ALL_MODELS], value=ALL_MODELS[0][0])
265
-
266
- filtered_models = [model[0] for model in ALL_MODELS if search_term.lower() in model[0].lower()]
267
-
268
- if filtered_models:
269
- return gr.Dropdown.update(choices=filtered_models, value=filtered_models[0])
270
- else:
271
- return gr.Dropdown.update(choices=[model[0] for model in ALL_MODELS], value=ALL_MODELS[0][0])
272
-
273
- def get_model_info(model_name):
274
- """Get model information by name"""
275
- for model in ALL_MODELS:
276
- if model[0] == model_name:
277
- return model
278
- return None
279
-
280
- def update_context_display(model_name):
281
- """Update the context size display based on the selected model"""
282
- model_info = get_model_info(model_name)
283
- if model_info:
284
- name, model_id, context_size = model_info
285
- context_formatted = f"{context_size:,}"
286
- return f"{context_formatted} tokens"
287
- return "Unknown"
288
-
289
- def update_category_models(category):
290
- """Update models list when category changes"""
291
- for cat in MODELS:
292
- if cat["category"] == category:
293
- return gr.Radio.update(choices=[model[0] for model in cat["models"]], value=cat["models"][0][0])
294
- return gr.Radio.update(choices=[], value=None)
295
 
296
  def ask_ai(message, chatbot, model_choice, temperature, max_tokens, top_p,
297
  frequency_penalty, presence_penalty, repetition_penalty, top_k,
@@ -428,10 +400,6 @@ def ask_ai(message, chatbot, model_choice, temperature, max_tokens, top_p,
428
 
429
  return chatbot, ""
430
 
431
- def process_uploaded_images(files):
432
- """Process uploaded image files"""
433
- return [file.name for file in files]
434
-
435
  def clear_chat():
436
  """Reset all inputs"""
437
  return [], "", [], [], 0.7, 1000, 0.8, 0.0, 0.0, 1.0, 40, 0.1, 0, 0.0, False, "default", "none", "", []
@@ -465,7 +433,7 @@ def create_app():
465
  }
466
  """) as demo:
467
  gr.Markdown("""
468
- # CrispChat
469
 
470
  Chat with various AI models from OpenRouter with support for images and documents.
471
  """)
@@ -545,15 +513,16 @@ def create_app():
545
 
546
  # Model category selection
547
  with gr.Accordion("Browse by Category", open=False):
548
- model_categories = gr.Radio(
549
  [category["category"] for category in MODELS],
550
  label="Categories",
551
  value=MODELS[0]["category"]
552
  )
553
 
554
- category_models = gr.Radio(
555
  [model[0] for model in MODELS[0]["models"]],
556
- label="Models in Category"
 
557
  )
558
 
559
  with gr.Accordion("Generation Parameters", open=False):
@@ -719,60 +688,90 @@ def create_app():
719
  Built with ❤️ using Gradio and OpenRouter API | Context sizes shown next to model names
720
  """)
721
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
722
  # Connect model search to dropdown filter
723
  model_search.change(
724
  fn=filter_models,
725
- inputs=[model_search],
726
- outputs=[model_choice]
727
  )
728
 
729
  # Update context display when model changes
730
  model_choice.change(
731
  fn=update_context_display,
732
- inputs=[model_choice],
733
- outputs=[context_display]
 
 
 
 
 
 
 
734
  )
735
 
736
  # Update model list when category changes
737
  model_categories.change(
738
  fn=update_category_models,
739
- inputs=[model_categories],
740
- outputs=[category_models]
741
  )
742
 
743
  # Update main model choice when category model is selected
744
  category_models.change(
745
  fn=lambda x: x,
746
- inputs=[category_models],
747
- outputs=[model_choice]
748
  )
749
 
750
  # Process uploaded images
751
  image_upload_btn.upload(
752
  fn=process_uploaded_images,
753
- inputs=[image_upload_btn],
754
- outputs=[images]
755
- )
756
-
757
- # Update model info when model changes
758
- def update_model_info(model_name):
759
- model_info = get_model_info(model_name)
760
- if model_info:
761
- name, model_id, context_size = model_info
762
- return f"""
763
- <div class="model-info">
764
- <h3>{name}</h3>
765
- <p><strong>Model ID:</strong> {model_id}</p>
766
- <p><strong>Context Size:</strong> {context_size:,} tokens</p>
767
- <p><strong>Provider:</strong> {model_id.split('/')[0]}</p>
768
- </div>
769
- """
770
- return "<p>Model information not available</p>"
771
-
772
- model_choice.change(
773
- fn=update_model_info,
774
- inputs=[model_choice],
775
- outputs=[model_info_display]
776
  )
777
 
778
  # Set up events for the submit button
 
164
  elif file_extension == "webp":
165
  mime_type = "image/webp"
166
  return f"data:{mime_type};base64,{encoded_string}"
167
+ elif Image is not None and hasattr(image_path, 'save'): # Pillow Image
168
+ buffered = io.BytesIO()
169
+ image_path.save(buffered, format="PNG")
170
+ encoded_string = base64.b64encode(buffered.getvalue()).decode('utf-8')
171
+ return f"data:image/png;base64,{encoded_string}"
172
+ else: # Handle file object or other types
173
+ logger.error(f"Unsupported image type: {type(image_path)}")
174
+ return None
 
175
  except Exception as e:
176
  logger.error(f"Error encoding image: {str(e)}")
177
  return None
 
257
 
258
  return content
259
 
260
+ def process_uploaded_images(files):
261
+ """Process uploaded image files - fixed for Gradio 4.44.1"""
262
+ file_paths = []
263
+ for file in files:
264
+ if hasattr(file, 'name'):
265
+ file_paths.append(file.name)
266
+ return file_paths
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
267
 
268
  def ask_ai(message, chatbot, model_choice, temperature, max_tokens, top_p,
269
  frequency_penalty, presence_penalty, repetition_penalty, top_k,
 
400
 
401
  return chatbot, ""
402
 
 
 
 
 
403
  def clear_chat():
404
  """Reset all inputs"""
405
  return [], "", [], [], 0.7, 1000, 0.8, 0.0, 0.0, 1.0, 40, 0.1, 0, 0.0, False, "default", "none", "", []
 
433
  }
434
  """) as demo:
435
  gr.Markdown("""
436
+ # Enhanced AI Chat
437
 
438
  Chat with various AI models from OpenRouter with support for images and documents.
439
  """)
 
513
 
514
  # Model category selection
515
  with gr.Accordion("Browse by Category", open=False):
516
+ model_categories = gr.Dropdown(
517
  [category["category"] for category in MODELS],
518
  label="Categories",
519
  value=MODELS[0]["category"]
520
  )
521
 
522
+ category_models = gr.Dropdown(
523
  [model[0] for model in MODELS[0]["models"]],
524
+ label="Models in Category",
525
+ value=MODELS[0]["models"][0][0]
526
  )
527
 
528
  with gr.Accordion("Generation Parameters", open=False):
 
688
  Built with ❤️ using Gradio and OpenRouter API | Context sizes shown next to model names
689
  """)
690
 
691
+ # Helper function to filter models
692
+ def filter_models(search_term):
693
+ if not search_term:
694
+ return [model[0] for model in ALL_MODELS], ALL_MODELS[0][0]
695
+
696
+ filtered_models = [model[0] for model in ALL_MODELS if search_term.lower() in model[0].lower()]
697
+
698
+ if filtered_models:
699
+ return filtered_models, filtered_models[0]
700
+ else:
701
+ return [model[0] for model in ALL_MODELS], ALL_MODELS[0][0]
702
+
703
+ # Helper function for context display
704
+ def update_context_display(model_name):
705
+ for model in ALL_MODELS:
706
+ if model[0] == model_name:
707
+ _, _, context_size = model
708
+ context_formatted = f"{context_size:,}"
709
+ return f"{context_formatted} tokens"
710
+ return "Unknown"
711
+
712
+ # Helper function for model info display
713
+ def update_model_info(model_name):
714
+ for model in ALL_MODELS:
715
+ if model[0] == model_name:
716
+ name, model_id, context_size = model
717
+ return f"""
718
+ <div class="model-info">
719
+ <h3>{name}</h3>
720
+ <p><strong>Model ID:</strong> {model_id}</p>
721
+ <p><strong>Context Size:</strong> {context_size:,} tokens</p>
722
+ <p><strong>Provider:</strong> {model_id.split('/')[0]}</p>
723
+ </div>
724
+ """
725
+ return "<p>Model information not available</p>"
726
+
727
+ # Helper function to update category models
728
+ def update_category_models(category):
729
+ for cat in MODELS:
730
+ if cat["category"] == category:
731
+ model_names = [model[0] for model in cat["models"]]
732
+ return model_names, model_names[0]
733
+ return [], ""
734
+
735
  # Connect model search to dropdown filter
736
  model_search.change(
737
  fn=filter_models,
738
+ inputs=model_search,
739
+ outputs=[model_choice, model_choice]
740
  )
741
 
742
  # Update context display when model changes
743
  model_choice.change(
744
  fn=update_context_display,
745
+ inputs=model_choice,
746
+ outputs=context_display
747
+ )
748
+
749
+ # Update model info when model changes
750
+ model_choice.change(
751
+ fn=update_model_info,
752
+ inputs=model_choice,
753
+ outputs=model_info_display
754
  )
755
 
756
  # Update model list when category changes
757
  model_categories.change(
758
  fn=update_category_models,
759
+ inputs=model_categories,
760
+ outputs=[category_models, category_models]
761
  )
762
 
763
  # Update main model choice when category model is selected
764
  category_models.change(
765
  fn=lambda x: x,
766
+ inputs=category_models,
767
+ outputs=model_choice
768
  )
769
 
770
  # Process uploaded images
771
  image_upload_btn.upload(
772
  fn=process_uploaded_images,
773
+ inputs=image_upload_btn,
774
+ outputs=images
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
775
  )
776
 
777
  # Set up events for the submit button