NEXAS commited on
Commit
186923b
Β·
verified Β·
1 Parent(s): b8fa391

Update src/streamlit_app.py

Browse files
Files changed (1) hide show
  1. src/streamlit_app.py +132 -120
src/streamlit_app.py CHANGED
@@ -10,12 +10,13 @@ import tempfile
10
  import time
11
 
12
  # ----- Setup -----
 
13
  CACHE_DIR = tempfile.gettempdir()
14
  CHROMA_PATH = os.path.join(CACHE_DIR, "chroma_db")
15
  DEMO_DIR = os.path.join(CACHE_DIR, "demo_images")
16
  os.makedirs(DEMO_DIR, exist_ok=True)
17
 
18
- # ----- Initialize Session State -----
19
  if 'dataset_loaded' not in st.session_state:
20
  st.session_state.dataset_loaded = False
21
  if 'dataset_name' not in st.session_state:
@@ -43,20 +44,18 @@ if 'chroma_client' not in st.session_state:
43
  name="user_images", metadata={"hnsw:space": "cosine"}
44
  )
45
 
46
- # ----- Title -----
47
- st.title("πŸ” CLIP-Based Image Search")
48
-
49
- # ----- Dataset Buttons -----
50
- col1, col2 = st.columns(2)
51
- if col1.button("πŸ“¦ Use Demo Images"):
52
- st.session_state.dataset_name = "demo"
53
- st.session_state.dataset_loaded = False
54
-
55
- if col2.button("πŸ“€ Upload Your Images"):
56
- st.session_state.dataset_name = "user"
57
- st.session_state.dataset_loaded = False
58
-
59
- # ----- Download + Embed Demo Images -----
60
  def download_image_with_retry(url, path, retries=3, delay=1.0):
61
  for attempt in range(retries):
62
  try:
@@ -65,111 +64,124 @@ def download_image_with_retry(url, path, retries=3, delay=1.0):
65
  with open(path, 'wb') as f:
66
  f.write(r.content)
67
  return True
68
- except Exception as e:
69
  time.sleep(delay)
70
  return False
71
 
72
- if st.session_state.dataset_name == "demo" and not st.session_state.dataset_loaded:
73
- with st.spinner("Downloading and indexing demo images..."):
74
- st.session_state.demo_collection.delete(ids=[str(i) for i in range(50)])
75
- demo_image_paths, demo_images = [], []
76
- for i in range(50):
77
- path = os.path.join(DEMO_DIR, f"img_{i+1:02}.jpg")
78
- if not os.path.exists(path):
79
- url = f"https://picsum.photos/seed/{i}/1024/768"
80
- download_image_with_retry(url, path)
81
- try:
82
- demo_images.append(Image.open(path).convert("RGB"))
83
- demo_image_paths.append(path)
84
- except:
85
- continue # skip corrupted
86
-
87
- embeddings, ids, metadatas = [], [], []
88
- for i, img in enumerate(demo_images):
89
- img_tensor = st.session_state.preprocess(img).unsqueeze(0).to(st.session_state.device)
90
- with torch.no_grad():
91
- embedding = st.session_state.model.encode_image(img_tensor).cpu().numpy().flatten()
92
- embeddings.append(embedding)
93
- ids.append(str(i))
94
- metadatas.append({"path": demo_image_paths[i]})
95
-
96
- st.session_state.demo_collection.add(embeddings=embeddings, ids=ids, metadatas=metadatas)
97
- st.session_state.demo_images = demo_images
98
- st.session_state.dataset_loaded = True
99
-
100
- st.success("βœ… Demo images loaded!")
101
-
102
- # ----- Upload User Images -----
103
- if st.session_state.dataset_name == "user" and not st.session_state.dataset_loaded:
104
- uploaded = st.file_uploader("Upload your images", type=["jpg", "jpeg", "png"], accept_multiple_files=True)
105
- if uploaded:
106
- st.session_state.user_collection.delete(ids=[
107
- str(i) for i in range(st.session_state.user_collection.count())
108
- ])
109
- user_images = []
110
- for i, file in enumerate(uploaded):
111
- try:
112
- img = Image.open(file).convert("RGB")
113
- except:
114
- continue
115
- user_images.append(img)
116
- img_tensor = st.session_state.preprocess(img).unsqueeze(0).to(st.session_state.device)
117
- with torch.no_grad():
118
- embedding = st.session_state.model.encode_image(img_tensor).cpu().numpy().flatten()
119
- st.session_state.user_collection.add(
120
- embeddings=[embedding], ids=[str(i)], metadatas=[{"index": i}]
121
- )
122
-
123
- st.session_state.user_images = user_images
124
- st.session_state.dataset_loaded = True
125
- st.success(f"βœ… Uploaded {len(user_images)} images.")
126
-
127
- # ----- Search Section -----
128
- if st.session_state.dataset_loaded:
129
- st.subheader("πŸ”Ž Search Section")
130
- query_type = st.radio("Search by:", ("Text", "Image"))
131
-
132
- query_embedding = None
133
- if query_type == "Text":
134
- text_query = st.text_input("Enter search text:")
135
- if text_query:
136
- tokens = clip.tokenize([text_query]).to(st.session_state.device)
137
- with torch.no_grad():
138
- query_embedding = st.session_state.model.encode_text(tokens).cpu().numpy().flatten()
139
-
140
- elif query_type == "Image":
141
- query_file = st.file_uploader("Upload query image", type=["jpg", "jpeg", "png"], key="query_image")
142
- if query_file:
143
- query_img = Image.open(query_file).convert("RGB")
144
- st.image(query_img, caption="Query Image", width=200)
145
- query_tensor = st.session_state.preprocess(query_img).unsqueeze(0).to(st.session_state.device)
146
- with torch.no_grad():
147
- query_embedding = st.session_state.model.encode_image(query_tensor).cpu().numpy().flatten()
148
-
149
- # ----- Perform Search -----
150
- if query_embedding is not None:
151
- if st.session_state.dataset_name == "demo":
152
- collection = st.session_state.demo_collection
153
- images = st.session_state.demo_images
154
- else:
155
- collection = st.session_state.user_collection
156
- images = st.session_state.user_images
157
-
158
- if collection.count() > 0:
159
- results = collection.query(
160
- query_embeddings=[query_embedding],
161
- n_results=min(5, collection.count())
162
- )
163
- ids = results["ids"][0]
164
- distances = results["distances"][0]
165
- similarities = [1 - d for d in distances]
166
-
167
- st.subheader("πŸ”— Top Matches")
168
- cols = st.columns(len(ids))
169
- for i, (img_id, sim) in enumerate(zip(ids, similarities)):
170
- with cols[i]:
171
- st.image(images[int(img_id)], caption=f"Sim: {sim:.3f}", width=150)
172
- else:
173
- st.warning("No indexed images to search.")
174
- else:
175
- st.info("πŸ‘† Please select a dataset (Demo or Upload Images) to begin.")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
10
  import time
11
 
12
  # ----- Setup -----
13
+ st.set_page_config(page_title="CLIP Image Search", layout="wide")
14
  CACHE_DIR = tempfile.gettempdir()
15
  CHROMA_PATH = os.path.join(CACHE_DIR, "chroma_db")
16
  DEMO_DIR = os.path.join(CACHE_DIR, "demo_images")
17
  os.makedirs(DEMO_DIR, exist_ok=True)
18
 
19
+ # ----- Session State Init -----
20
  if 'dataset_loaded' not in st.session_state:
21
  st.session_state.dataset_loaded = False
22
  if 'dataset_name' not in st.session_state:
 
44
  name="user_images", metadata={"hnsw:space": "cosine"}
45
  )
46
 
47
+ # ----- Sidebar -----
48
+ with st.sidebar:
49
+ st.title("🧠 CLIP Search App")
50
+ st.markdown("Choose a dataset to begin:")
51
+ if st.button("πŸ“¦ Load Demo Images"):
52
+ st.session_state.dataset_name = "demo"
53
+ st.session_state.dataset_loaded = False
54
+ if st.button("πŸ“€ Upload Your Images"):
55
+ st.session_state.dataset_name = "user"
56
+ st.session_state.dataset_loaded = False
57
+
58
+ # ----- Helper -----
 
 
59
  def download_image_with_retry(url, path, retries=3, delay=1.0):
60
  for attempt in range(retries):
61
  try:
 
64
  with open(path, 'wb') as f:
65
  f.write(r.content)
66
  return True
67
+ except Exception:
68
  time.sleep(delay)
69
  return False
70
 
71
+ # ----- Main App -----
72
+ left, right = st.columns([2, 1])
73
+
74
+ with left:
75
+ st.title("πŸ” CLIP-Based Image Search")
76
+
77
+ # ----- Load Demo -----
78
+ if st.session_state.dataset_name == "demo" and not st.session_state.dataset_loaded:
79
+ with st.spinner("Downloading and indexing demo images..."):
80
+ st.session_state.demo_collection.delete(ids=[str(i) for i in range(50)])
81
+ demo_image_paths, demo_images = [], []
82
+ for i in range(50):
83
+ path = os.path.join(DEMO_DIR, f"img_{i+1:02}.jpg")
84
+ if not os.path.exists(path):
85
+ url = f"https://picsum.photos/seed/{i}/1024/768"
86
+ download_image_with_retry(url, path)
87
+ try:
88
+ demo_images.append(Image.open(path).convert("RGB"))
89
+ demo_image_paths.append(path)
90
+ except:
91
+ continue
92
+ embeddings, ids, metadatas = [], [], []
93
+ for i, img in enumerate(demo_images):
94
+ img_tensor = st.session_state.preprocess(img).unsqueeze(0).to(st.session_state.device)
95
+ with torch.no_grad():
96
+ embedding = st.session_state.model.encode_image(img_tensor).cpu().numpy().flatten()
97
+ embeddings.append(embedding)
98
+ ids.append(str(i))
99
+ metadatas.append({"path": demo_image_paths[i]})
100
+ st.session_state.demo_collection.add(embeddings=embeddings, ids=ids, metadatas=metadatas)
101
+ st.session_state.demo_images = demo_images
102
+ st.session_state.dataset_loaded = True
103
+ st.success("βœ… Demo images loaded!")
104
+
105
+ # ----- Upload User Images -----
106
+ if st.session_state.dataset_name == "user" and not st.session_state.dataset_loaded:
107
+ uploaded = st.file_uploader("Upload your images", type=["jpg", "jpeg", "png"], accept_multiple_files=True)
108
+ if uploaded:
109
+ st.session_state.user_collection.delete(ids=[
110
+ str(i) for i in range(st.session_state.user_collection.count())
111
+ ])
112
+ user_images = []
113
+ for i, file in enumerate(uploaded):
114
+ try:
115
+ img = Image.open(file).convert("RGB")
116
+ except:
117
+ continue
118
+ user_images.append(img)
119
+ img_tensor = st.session_state.preprocess(img).unsqueeze(0).to(st.session_state.device)
120
+ with torch.no_grad():
121
+ embedding = st.session_state.model.encode_image(img_tensor).cpu().numpy().flatten()
122
+ st.session_state.user_collection.add(
123
+ embeddings=[embedding], ids=[str(i)], metadatas=[{"index": i}]
124
+ )
125
+ st.session_state.user_images = user_images
126
+ st.session_state.dataset_loaded = True
127
+ st.success(f"βœ… Uploaded {len(user_images)} images.")
128
+
129
+ # ----- Search Section -----
130
+ if st.session_state.dataset_loaded:
131
+ st.subheader("πŸ”Ž Search")
132
+ query_type = st.radio("Search by:", ("Text", "Image"))
133
+
134
+ query_embedding = None
135
+ if query_type == "Text":
136
+ text_query = st.text_input("Enter your search prompt:")
137
+ if text_query:
138
+ tokens = clip.tokenize([text_query]).to(st.session_state.device)
139
+ with torch.no_grad():
140
+ query_embedding = st.session_state.model.encode_text(tokens).cpu().numpy().flatten()
141
+ elif query_type == "Image":
142
+ query_file = st.file_uploader("Upload query image", type=["jpg", "jpeg", "png"], key="query_image")
143
+ if query_file:
144
+ query_img = Image.open(query_file).convert("RGB")
145
+ st.image(query_img, caption="Query Image", width=200)
146
+ query_tensor = st.session_state.preprocess(query_img).unsqueeze(0).to(st.session_state.device)
147
+ with torch.no_grad():
148
+ query_embedding = st.session_state.model.encode_image(query_tensor).cpu().numpy().flatten()
149
+
150
+ # ----- Perform Search -----
151
+ if query_embedding is not None:
152
+ if st.session_state.dataset_name == "demo":
153
+ collection = st.session_state.demo_collection
154
+ images = st.session_state.demo_images
155
+ else:
156
+ collection = st.session_state.user_collection
157
+ images = st.session_state.user_images
158
+
159
+ if collection.count() > 0:
160
+ results = collection.query(
161
+ query_embeddings=[query_embedding],
162
+ n_results=min(5, collection.count())
163
+ )
164
+ ids = results["ids"][0]
165
+ distances = results["distances"][0]
166
+ similarities = [1 - d for d in distances]
167
+
168
+ st.subheader("🎯 Top Matches")
169
+ cols = st.columns(len(ids))
170
+ for i, (img_id, sim) in enumerate(zip(ids, similarities)):
171
+ with cols[i]:
172
+ st.image(images[int(img_id)], caption=f"Similarity: {sim:.3f}", use_column_width=True)
173
+ else:
174
+ st.warning("⚠️ No images available for search.")
175
+ else:
176
+ st.info("πŸ‘ˆ Choose a dataset from the sidebar to get started.")
177
+
178
+ # ----- Right Panel: Show Current Dataset Images -----
179
+ with right:
180
+ st.subheader("πŸ–ΌοΈ Dataset Preview")
181
+ image_list = st.session_state.demo_images if st.session_state.dataset_name == "demo" else st.session_state.user_images
182
+ if st.session_state.dataset_loaded and image_list:
183
+ st.caption(f"Showing {len(image_list)} images")
184
+ for i, img in enumerate(image_list[:20]):
185
+ st.image(img, use_column_width=True)
186
+ else:
187
+ st.markdown("No images to preview yet.")