Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -12,7 +12,7 @@ import chromadb
|
|
12 |
|
13 |
app = FastAPI()
|
14 |
client = chromadb.PersistentClient(path="/data/chroma_db")
|
15 |
-
collection = client.get_or_create_collection(name="knowledge_base"
|
16 |
|
17 |
print("Created collection with 512 dimensions!")
|
18 |
|
@@ -24,7 +24,7 @@ collection = client.get_collection(name="knowledge_base")
|
|
24 |
print("Collection Embedding Dimension:", collection.metadata)
|
25 |
|
26 |
# Initialize models
|
27 |
-
text_model = SentenceTransformer('all-MiniLM-
|
28 |
clip_model = CLIPModel.from_pretrained("openai/clip-vit-base-patch32")
|
29 |
clip_processor = CLIPProcessor.from_pretrained("openai/clip-vit-base-patch32")
|
30 |
|
|
|
12 |
|
13 |
app = FastAPI()
|
14 |
client = chromadb.PersistentClient(path="/data/chroma_db")
|
15 |
+
collection = client.get_or_create_collection(name="knowledge_base")
|
16 |
|
17 |
print("Created collection with 512 dimensions!")
|
18 |
|
|
|
24 |
print("Collection Embedding Dimension:", collection.metadata)
|
25 |
|
26 |
# Initialize models
|
27 |
+
text_model = SentenceTransformer('sentence-transformers/all-MiniLM-L12-v2')
|
28 |
clip_model = CLIPModel.from_pretrained("openai/clip-vit-base-patch32")
|
29 |
clip_processor = CLIPProcessor.from_pretrained("openai/clip-vit-base-patch32")
|
30 |
|