sflindrs commited on
Commit
f1a5355
·
verified ·
1 Parent(s): 752ac37

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +11 -10
app.py CHANGED
@@ -7,16 +7,17 @@ multiprocessing.set_start_method("spawn", force=True)
7
 
8
  # --- Trending models for image text-to-text tasks ---
9
  TRENDING_MODELS = [
10
- "Salesforce/blip2-opt-2.7b",
11
- "Salesforce/blip2-flan-t5-xl",
12
- "Salesforce/blip-image-captioning-base",
13
- "Salesforce/blip-image-captioning-large",
14
- "nlpconnect/vit-gpt2-image-captioning",
15
- "OFA-Sys/OFA-base",
16
- "OFA-Sys/OFA-large",
17
- "dandelin/vilt-b32-finetuned-vqa",
18
- "dandelin/vilt-b32-mlm",
19
- "uclanlp/visualbert-vqa-coco-pre"
 
20
  ]
21
 
22
  # --- Helper: if the user selects "Custom", then they can enter any model identifier ---
 
7
 
8
  # --- Trending models for image text-to-text tasks ---
9
  TRENDING_MODELS = [
10
+ "Salesforce/blip2-opt-2.7b", # Uses Blip2Config
11
+ "Salesforce/blip2-flan-t5-xl", # Uses Blip2Config
12
+ "Salesforce/instructblip-vicuna-7b", # Uses InstructBlipConfig
13
+ "LLaVA/LLaVA-7B", # Uses LlavaConfig
14
+ "LLaVA/LLaVA-13B", # Uses LlavaConfig
15
+ "LlavaNext/LlavaNext-7B", # Uses LlavaNextConfig
16
+ "Qwen2-VL-7B", # Uses Qwen2VLConfig
17
+ "google/pix2struct-base", # Uses Pix2StructConfig
18
+ "nlpconnect/vit-gpt2-image-captioning", # Uses VisionEncoderDecoderConfig
19
+ "LlavaOneVision/LlavaOneVision-7B", # Uses LlavaOnevisionConfig
20
+ "MosaicML/mllama-7b" # Uses MllamaConfig
21
  ]
22
 
23
  # --- Helper: if the user selects "Custom", then they can enter any model identifier ---