Redmind commited on
Commit
d188171
·
verified ·
1 Parent(s): dc33092

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +2 -2
app.py CHANGED
@@ -25,8 +25,8 @@ print("Collection Embedding Dimension:", collection.metadata)
25
 
26
  # Initialize models
27
  text_model = SentenceTransformer('all-MiniLM-L6-v2')
28
- clip_model = CLIPModel.from_pretrained("openai/clip-vit-base-patch32")
29
- clip_processor = CLIPProcessor.from_pretrained("openai/clip-vit-base-patch32")
30
 
31
  # Folder for extracted images
32
  IMAGE_FOLDER = "/data/extracted_images"
 
25
 
26
  # Initialize models
27
  text_model = SentenceTransformer('all-MiniLM-L6-v2')
28
+ clip_model = CLIPModel.from_pretrained("sentence-transformers/clip-ViT-B-32")
29
+ clip_processor = CLIPProcessor.from_pretrained("sentence-transformers/clip-ViT-B-32")
30
 
31
  # Folder for extracted images
32
  IMAGE_FOLDER = "/data/extracted_images"