Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -90,42 +90,45 @@ def extract_images_from_pptx(pptx_path):
|
|
90 |
def get_text_embedding(text):
|
91 |
return text_model.encode(text).tolist()
|
92 |
|
93 |
-
# Extract Image Embeddings
|
94 |
def get_image_embedding(image_path):
|
95 |
try:
|
96 |
image = Image.open(image_path)
|
97 |
inputs = processor(images=image, return_tensors="pt")
|
98 |
with torch.no_grad():
|
99 |
image_embedding = model.get_image_features(**inputs).numpy().flatten()
|
|
|
|
|
|
|
|
|
|
|
|
|
100 |
return image_embedding.tolist()
|
101 |
except Exception as e:
|
102 |
print(f"Error generating image embedding: {e}")
|
103 |
return None
|
104 |
|
105 |
-
# Reduce Embedding Dimensions (If Needed)
|
106 |
-
def reduce_embedding_dim(embeddings):
|
107 |
-
try:
|
108 |
-
embeddings = np.array(embeddings)
|
109 |
-
n_components = min(embeddings.shape[0], embeddings.shape[1], 384) # Ensure valid PCA size
|
110 |
-
pca = PCA(n_components=n_components)
|
111 |
-
return pca.fit_transform(embeddings).tolist()
|
112 |
-
except Exception as e:
|
113 |
-
print(f"Error in PCA transformation: {e}")
|
114 |
-
return embeddings.tolist() # Return original embeddings if PCA fails
|
115 |
-
|
116 |
# Store Data in ChromaDB
|
117 |
def store_data(texts, image_paths):
|
118 |
for i, text in enumerate(texts):
|
119 |
if text:
|
120 |
-
|
121 |
-
|
|
|
|
|
122 |
all_embeddings = [get_image_embedding(img_path) for img_path in image_paths if get_image_embedding(img_path) is not None]
|
|
|
123 |
if all_embeddings:
|
124 |
all_embeddings = np.array(all_embeddings)
|
125 |
-
|
|
|
|
|
|
|
|
|
|
|
126 |
for j, img_path in enumerate(image_paths):
|
127 |
-
collection.add(ids=[f"image_{j}"], embeddings=[
|
128 |
-
|
129 |
print("Data stored successfully!")
|
130 |
|
131 |
# Process and Store from Files
|
|
|
90 |
def get_text_embedding(text):
|
91 |
return text_model.encode(text).tolist()
|
92 |
|
93 |
+
# Extract Image Embeddings and Reduce to 384 Dimensions
|
94 |
def get_image_embedding(image_path):
|
95 |
try:
|
96 |
image = Image.open(image_path)
|
97 |
inputs = processor(images=image, return_tensors="pt")
|
98 |
with torch.no_grad():
|
99 |
image_embedding = model.get_image_features(**inputs).numpy().flatten()
|
100 |
+
|
101 |
+
# Ensure embedding is 384-dimensional
|
102 |
+
if len(image_embedding) != 384:
|
103 |
+
pca = PCA(n_components=384)
|
104 |
+
image_embedding = pca.fit_transform(image_embedding.reshape(1, -1)).flatten()
|
105 |
+
|
106 |
return image_embedding.tolist()
|
107 |
except Exception as e:
|
108 |
print(f"Error generating image embedding: {e}")
|
109 |
return None
|
110 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
111 |
# Store Data in ChromaDB
|
112 |
def store_data(texts, image_paths):
|
113 |
for i, text in enumerate(texts):
|
114 |
if text:
|
115 |
+
text_embedding = get_text_embedding(text)
|
116 |
+
if len(text_embedding) == 384:
|
117 |
+
collection.add(ids=[f"text_{i}"], embeddings=[text_embedding], documents=[text])
|
118 |
+
|
119 |
all_embeddings = [get_image_embedding(img_path) for img_path in image_paths if get_image_embedding(img_path) is not None]
|
120 |
+
|
121 |
if all_embeddings:
|
122 |
all_embeddings = np.array(all_embeddings)
|
123 |
+
|
124 |
+
# Apply PCA only if necessary
|
125 |
+
if all_embeddings.shape[1] != 384:
|
126 |
+
pca = PCA(n_components=384)
|
127 |
+
all_embeddings = pca.fit_transform(all_embeddings)
|
128 |
+
|
129 |
for j, img_path in enumerate(image_paths):
|
130 |
+
collection.add(ids=[f"image_{j}"], embeddings=[all_embeddings[j].tolist()], documents=[img_path])
|
131 |
+
|
132 |
print("Data stored successfully!")
|
133 |
|
134 |
# Process and Store from Files
|