ans123 commited on
Commit
2680fbd
Β·
verified Β·
1 Parent(s): 14871de

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +256 -265
app.py CHANGED
@@ -1,334 +1,325 @@
1
  import os
2
- import zipfile
3
  import torch
4
  import clip
 
5
  import numpy as np
6
  from PIL import Image
7
  import gradio as gr
8
  import openai
 
9
  from tqdm import tqdm
10
- from glob import glob
11
- import chromadb
12
- from chromadb.utils import embedding_functions
13
- import json
14
- import time
15
- from dotenv import load_dotenv
16
-
17
- # Load environment variables from .env file
18
- load_dotenv()
19
 
20
  # ─────────────────────────────────────────────
21
- # πŸ“‚ STEP 1: UNZIP TO CORRECT STRUCTURE
22
  # ─────────────────────────────────────────────
23
- zip_name = "lfw-faces.zip"
24
- unzip_dir = "lfw-faces"
25
-
26
- if not os.path.exists(unzip_dir):
27
- print("πŸ”“ Unzipping...")
28
- with zipfile.ZipFile(zip_name, "r") as zip_ref:
29
- zip_ref.extractall(unzip_dir)
30
- print("βœ… Unzipped into:", unzip_dir)
31
-
32
- # True image root after unzip
33
- img_root = os.path.join(unzip_dir, "lfw-deepfunneled")
34
 
35
  # ─────────────────────────────────────────────
36
- # 🧠 STEP 2: LOAD CLIP MODEL
37
  # ─────────────────────────────────────────────
38
- device = "cuda" if torch.cuda.is_available() else "cpu"
39
- model, preprocess = clip.load("ViT-B/32", device=device)
40
- print(f"βœ… CLIP model loaded on {device}")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
41
 
42
  # ─────────────────────────────────────────────
43
- # πŸ—„οΈ STEP 3: CHROMA DB SETUP & EMBEDDING FUNCTION
44
  # ─────────────────────────────────────────────
45
- class ClipEmbeddingFunction:
46
- """Custom embedding function for Chroma DB using CLIP"""
 
 
 
 
 
 
 
 
 
 
 
 
 
 
47
 
48
- def __init__(self, model, preprocess, device):
49
- self.model = model
50
- self.preprocess = preprocess
51
- self.device = device
52
 
53
- def __call__(self, images):
54
- """Generate embeddings for a list of image paths"""
55
- embeddings = []
 
56
 
57
- for image_path in images:
58
- try:
59
- # Check if the path is a string (for new additions from disk)
60
- if isinstance(image_path, str) and os.path.exists(image_path):
61
- img = Image.open(image_path).convert("RGB")
62
- else:
63
- # For query images that are already PIL images
64
- img = image_path.convert("RGB") if hasattr(image_path, 'convert') else image_path
65
 
66
- img_input = self.preprocess(img).unsqueeze(0).to(self.device)
 
 
 
 
 
67
  with torch.no_grad():
68
- emb = self.model.encode_image(img_input).cpu().numpy().flatten()
69
  emb /= np.linalg.norm(emb)
70
- embeddings.append(emb.tolist())
 
 
 
 
 
 
 
 
 
 
71
  except Exception as e:
72
- print(f"⚠️ Error embedding image: {e}")
73
- # Return a zero vector as fallback
74
- embeddings.append([0] * 512)
75
 
76
- return embeddings
 
 
 
 
 
 
 
 
 
77
 
78
- def setup_database():
79
- """Setup ChromaDB with CLIP embedding function"""
80
- try:
81
- # Create persistent client
82
- client = chromadb.PersistentClient(path="./chroma_db")
83
-
84
- # Create custom embedding function
85
- embedding_function = ClipEmbeddingFunction(model, preprocess, device)
86
-
87
- # Create or get existing collection
88
- collection = client.get_or_create_collection(
89
- name="faces",
90
- embedding_function=embedding_function,
91
- metadata={"hnsw:space": "cosine"} # Use cosine similarity
92
- )
93
 
94
- print("βœ… ChromaDB setup complete.")
95
- return client, collection
96
- except Exception as e:
97
- print(f"❌ Database setup failed: {e}")
98
- return None, None
99
 
100
- def populate_database(collection, limit=500):
101
- """Populate ChromaDB with images and their embeddings"""
102
- # Collect all .jpg files inside subfolders
103
- all_images = sorted(glob(os.path.join(img_root, "*", "*.jpg")))
104
- selected_images = all_images[:limit]
105
-
106
- if len(selected_images) == 0:
107
- raise RuntimeError("❌ No image files found in unzipped structure!")
 
 
 
 
 
 
 
108
 
109
- # Get existing IDs
110
- existing_ids = set()
111
  try:
112
- existing_count = collection.count()
113
- if existing_count > 0:
114
- results = collection.get(limit=existing_count)
115
- existing_ids = set(results['ids'])
 
116
  except Exception as e:
117
- print(f"Error getting existing IDs: {e}")
118
-
119
- # Filter out images that are already in the database
120
- new_images = []
121
- new_ids = []
122
- new_metadatas = []
123
 
124
- for fpath in selected_images:
125
- # Create ID from path
126
- image_id = fpath.replace('/', '_')
127
- if image_id not in existing_ids:
128
- new_images.append(fpath)
129
- new_ids.append(image_id)
130
- name = os.path.splitext(os.path.basename(fpath))[0].replace("_", " ")
131
- new_metadatas.append({
132
- "path": fpath,
133
- "name": name
134
- })
135
 
136
- if not new_images:
137
- print("βœ… All images are already in the database.")
138
- return
139
 
140
- print(f"🧠 Adding {len(new_images)} new images to the database...")
141
-
142
- # Process images in batches to avoid memory issues
143
- batch_size = 50
144
- for i in range(0, len(new_images), batch_size):
145
- batch_imgs = new_images[i:i+batch_size]
146
- batch_ids = new_ids[i:i+batch_size]
147
- batch_metadatas = new_metadatas[i:i+batch_size]
148
-
149
- print(f"Processing batch {i//batch_size + 1}/{(len(new_images)-1)//batch_size + 1}...")
150
 
151
  try:
152
- collection.add(
153
- documents=batch_imgs, # ChromaDB will call our embedding function on these
154
- ids=batch_ids,
155
- metadatas=batch_metadatas
156
- )
 
 
 
 
 
 
157
  except Exception as e:
158
- print(f"⚠️ Error adding batch to database: {e}")
159
 
160
- # Count total faces in database
161
- total_faces = collection.count()
162
- print(f"βœ… Database now contains {total_faces} faces.")
163
-
164
- # ─────────────────────────────────────────────
165
- # πŸ” STEP 4: LOAD OPENAI API KEY
166
- # ─────────────────────────────────────────────
167
- openai.api_key = os.getenv("OPENAI_API_KEY")
168
- if not openai.api_key:
169
- print("⚠️ OpenAI API key not found. GPT-4 analysis will not work.")
170
 
171
  # ─────────────────────────────────────────────
172
- # πŸ” STEP 5: FACE MATCHING FUNCTION
173
  # ─────────────────────────────────────────────
174
- def scan_face(user_image, collection):
175
- """Scan a face image and find matches in the database"""
176
- if user_image is None:
177
- return [], "", "", "Please upload a face image."
178
-
 
 
 
179
  try:
180
- # Query database for similar faces using the image directly
181
- results = collection.query(
182
- query_embeddings=None, # Will be generated by our embedding function
183
- query_images=[user_image], # Pass the PIL image directly
184
- n_results=5,
185
- include=["metadatas", "distances"]
186
- )
187
 
188
- metadatas = results.get("metadatas", [[]])[0]
189
- distances = results.get("distances", [[]])[0]
190
-
191
- gallery, captions, names = [], [], []
192
- scores = []
193
 
194
- for i, metadata in enumerate(metadatas):
195
- try:
196
- path = metadata["path"]
197
- name = metadata["name"]
198
-
199
- # Convert distance to similarity score (1 - normalized_distance)
200
- # ChromaDB uses cosine distance, so 0 is most similar, 2 is most different
201
- distance = distances[i]
202
- similarity = 1 - (distance / 2) # Convert to 0-1 scale
203
- scores.append(similarity)
204
-
205
- img = Image.open(path)
206
- gallery.append(img)
207
- captions.append(f"{name} (Score: {similarity:.2f})")
208
- names.append(name)
209
- except Exception as e:
210
- captions.append(f"⚠️ Error loading match image: {e}")
211
 
212
- risk_score = min(100, int(np.mean(scores) * 100)) if scores else 0
213
-
214
- # 🧠 GPT-4 EXPLANATION
215
- explanation = ""
216
- if openai.api_key and names:
217
- try:
218
- prompt = (
219
- f"The uploaded face matches closely with: {', '.join(names)}. "
220
- f"Based on this, should the user be suspicious? Analyze like a funny but smart AI dating detective."
221
- )
222
- response = openai.chat.completions.create(
223
- model="gpt-4",
224
- messages=[
225
- {"role": "system", "content": "You're a playful but intelligent AI face-matching analyst."},
226
- {"role": "user", "content": prompt}
227
- ]
228
- )
229
- explanation = response.choices[0].message.content
230
- except Exception as e:
231
- explanation = f"(OpenAI error): {e}"
232
- else:
233
- explanation = "OpenAI API key not set or no matches found."
234
-
235
- return gallery, "\n".join(captions), f"{risk_score}/100", explanation
236
-
237
  except Exception as e:
238
- return [], "", "", f"Error scanning face: {e}"
239
 
240
  # ─────────────────────────────────────────────
241
- # 🌱 STEP 6: ADD NEW FACE FUNCTION
242
  # ─────────────────────────────────────────────
243
- def add_new_face(image, name, collection):
244
- """Add a new face to the database"""
245
- if image is None or not name:
246
- return "Please provide both an image and a name."
 
 
 
 
 
247
 
248
- try:
249
- # Save image to a temporary file
250
- timestamp = int(time.time())
251
- os.makedirs("uploaded_faces", exist_ok=True)
252
- path = f"uploaded_faces/{name.replace(' ', '_')}_{timestamp}.jpg"
253
- image.save(path)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
254
 
255
- # Add to ChromaDB
256
- image_id = path.replace('/', '_')
257
- collection.add(
258
- documents=[path],
259
- ids=[image_id],
260
- metadatas=[{
261
- "path": path,
262
- "name": name
263
- }]
264
  )
265
 
266
- return f"βœ… Added {name} to the database successfully!"
267
- except Exception as e:
268
- return f"❌ Failed to add face: {e}"
 
 
 
 
 
 
 
269
 
270
  # ─────────────────────────────────────────────
271
- # πŸŽ›οΈ STEP 7: GRADIO UI
272
  # ─────────────────────────────────────────────
273
  def create_ui():
274
- """Create Gradio UI with both scan and add functionality"""
275
- # Setup database
276
- client, collection = setup_database()
277
- if collection is None:
278
- raise RuntimeError("❌ Database setup failed.")
279
-
280
- # Populate database with initial images
281
- populate_database(collection)
282
-
283
- # Wrapper functions for Gradio that use the database collection
284
- def scan_face_wrapper(image):
285
- return scan_face(image, collection)
286
-
287
- def add_face_wrapper(image, name):
288
- return add_new_face(image, name, collection)
289
 
290
- with gr.Blocks(title="Tinder Scanner – Real Face Match Detector") as demo:
291
- gr.Markdown("# Tinder Scanner – Real Face Match Detector")
292
- gr.Markdown("Scan a face image to find visual matches using CLIP and ChromaDB, and get a cheeky GPT-4 analysis.")
293
 
294
- with gr.Tab("Scan Face"):
295
- with gr.Row():
296
- with gr.Column():
297
- input_image = gr.Image(type="pil", label="Upload a Face Image")
298
- scan_button = gr.Button("πŸ” Scan Face")
 
 
 
 
 
 
299
 
300
- with gr.Column():
301
- gallery = gr.Gallery(label="πŸ” Top Matches", columns=[5], height="auto")
302
- captions = gr.Textbox(label="Match Names + Similarity Scores")
303
- risk_score = gr.Textbox(label="🚨 Cheating Risk Score")
304
- explanation = gr.Textbox(label="🧠 GPT-4 Explanation", lines=5)
305
 
306
- scan_button.click(
307
- fn=scan_face_wrapper,
308
- inputs=[input_image],
309
- outputs=[gallery, captions, risk_score, explanation]
310
- )
311
-
312
- with gr.Tab("Add New Face"):
313
- with gr.Row():
314
- with gr.Column():
315
- new_image = gr.Image(type="pil", label="Upload New Face Image")
316
- new_name = gr.Textbox(label="Person's Name")
317
- add_button = gr.Button("βž• Add to Database")
318
 
319
- with gr.Column():
320
- result = gr.Textbox(label="Result")
321
-
322
- add_button.click(
323
- fn=add_face_wrapper,
324
- inputs=[new_image, new_name],
325
- outputs=result
326
- )
327
 
328
  return demo
329
 
330
  # ─────────────────────────────────────────────
331
- # πŸš€ MAIN EXECUTION
332
  # ─────────────────────────────────────────────
333
  if __name__ == "__main__":
334
  demo = create_ui()
 
1
  import os
2
+ import json
3
  import torch
4
  import clip
5
+ import faiss
6
  import numpy as np
7
  from PIL import Image
8
  import gradio as gr
9
  import openai
10
+ import requests
11
  from tqdm import tqdm
12
+ from io import BytesIO
 
 
 
 
 
 
 
 
13
 
14
  # ─────────────────────────────────────────────
15
+ # 🧠 STEP 1: LOAD CLIP MODEL
16
  # ─────────────────────────────────────────────
17
+ device = "cuda" if torch.cuda.is_available() else "cpu"
18
+ model, preprocess = clip.load("ViT-B/32", device=device)
 
 
 
 
 
 
 
 
 
19
 
20
  # ─────────────────────────────────────────────
21
+ # πŸ“¦ STEP 2: LOAD PROFILE DATA FROM JSON
22
  # ─────────────────────────────────────────────
23
+ def load_profile_data(json_file_path=None, json_data=None):
24
+ """Load profile data either from a file or directly from JSON data"""
25
+ if json_file_path and os.path.exists(json_file_path):
26
+ with open(json_file_path, 'r') as f:
27
+ profiles = json.load(f)
28
+ elif json_data:
29
+ profiles = json_data
30
+ else:
31
+ # Sample data structure as fallback
32
+ profiles = [
33
+ {
34
+ "Id": "sample-id",
35
+ "Name": "Sample Profile",
36
+ "Age": 25,
37
+ "Bio": "Sample bio",
38
+ "Photos": [
39
+ "https://example.com/sample.jpg"
40
+ ]
41
+ }
42
+ ]
43
+
44
+ return profiles
45
 
46
  # ─────────────────────────────────────────────
47
+ # πŸ–ΌοΈ STEP 3: DOWNLOAD AND PROCESS IMAGES
48
  # ─────────────────────────────────────────────
49
+ def download_and_process_image(url):
50
+ """Download image from URL and return PIL Image"""
51
+ try:
52
+ response = requests.get(url, timeout=10)
53
+ response.raise_for_status()
54
+ img = Image.open(BytesIO(response.content)).convert("RGB")
55
+ return img
56
+ except Exception as e:
57
+ print(f"⚠️ Error downloading image from {url}: {e}")
58
+ return None
59
+
60
+ def generate_embeddings(profiles, max_images=500):
61
+ """Generate CLIP embeddings for profile images"""
62
+ embeddings = []
63
+ image_urls = []
64
+ profile_info = [] # Store name, age, etc. for each image
65
 
66
+ image_count = 0
 
 
 
67
 
68
+ print(f"🧠 Generating CLIP embeddings for profile images...")
69
+ for profile in tqdm(profiles, desc="Processing profiles"):
70
+ name = profile.get("Name", "Unknown")
71
+ age = profile.get("Age", "?")
72
 
73
+ for photo_url in profile.get("Photos", []):
74
+ if image_count >= max_images:
75
+ break
 
 
 
 
 
76
 
77
+ try:
78
+ img = download_and_process_image(photo_url)
79
+ if img is None:
80
+ continue
81
+
82
+ img_input = preprocess(img).unsqueeze(0).to(device)
83
  with torch.no_grad():
84
+ emb = model.encode_image(img_input).cpu().numpy().flatten()
85
  emb /= np.linalg.norm(emb)
86
+
87
+ embeddings.append(emb)
88
+ image_urls.append(photo_url)
89
+ profile_info.append({
90
+ "Name": name,
91
+ "Age": age,
92
+ "Id": profile.get("Id", "Unknown"),
93
+ "Bio": profile.get("Bio", "")
94
+ })
95
+
96
+ image_count += 1
97
  except Exception as e:
98
+ print(f"⚠️ Error with {photo_url}: {e}")
 
 
99
 
100
+ if image_count >= max_images:
101
+ break
102
+
103
+ if embeddings:
104
+ embeddings = np.vstack(embeddings).astype("float32")
105
+ else:
106
+ embeddings = np.array([]).astype("float32")
107
+
108
+ print(f"βœ… Finished embedding {len(embeddings)} images.")
109
+ return embeddings, image_urls, profile_info
110
 
111
+ # ─────────────────────────────────────────────
112
+ # ⚑ STEP 4: BUILD FAISS INDEX
113
+ # ─────────────────────────────────────────────
114
+ def build_faiss_index(embeddings):
115
+ """Build FAISS index from embeddings"""
116
+ if len(embeddings) == 0:
117
+ return None
 
 
 
 
 
 
 
 
118
 
119
+ dimension = embeddings.shape[1]
120
+ index = faiss.IndexFlatIP(dimension)
121
+ index.add(embeddings)
122
+ return index
 
123
 
124
+ # ─────────────────────────────────────────────
125
+ # πŸ” STEP 5: LOAD OPENAI API KEY
126
+ # ─────────────────────────────────────────────
127
+ def init_openai():
128
+ openai.api_key = os.getenv("OPENAI_API_KEY")
129
+ if not openai.api_key:
130
+ print("⚠️ Warning: OPENAI_API_KEY not found. GPT-4 analysis will not be available.")
131
+
132
+ # ─────────────────────────────────────────────
133
+ # πŸ”Ž STEP 6: SEARCH FUNCTIONALITY
134
+ # ─────────────────────────────────────────────
135
+ def search_similar_faces(user_image, index, image_urls, profile_info, top_k=5):
136
+ """Search for similar faces using CLIP + FAISS"""
137
+ if index is None:
138
+ return [], [], 0, "No index available. Please load profile data first."
139
 
 
 
140
  try:
141
+ user_image = user_image.convert("RGB")
142
+ tensor = preprocess(user_image).unsqueeze(0).to(device)
143
+ with torch.no_grad():
144
+ query_emb = model.encode_image(tensor).cpu().numpy().astype("float32")
145
+ query_emb /= np.linalg.norm(query_emb)
146
  except Exception as e:
147
+ return [], [], 0, f"Image preprocessing failed: {e}"
 
 
 
 
 
148
 
149
+ scores, indices = index.search(query_emb, top_k)
150
+ scores, indices = scores.flatten(), indices.flatten()
 
 
 
 
 
 
 
 
 
151
 
152
+ matching_images = []
153
+ match_details = []
 
154
 
155
+ for i in range(len(indices)):
156
+ idx = indices[i]
157
+ score = scores[i]
 
 
 
 
 
 
 
158
 
159
  try:
160
+ url = image_urls[idx]
161
+ info = profile_info[idx]
162
+
163
+ img = download_and_process_image(url)
164
+ if img:
165
+ matching_images.append(img)
166
+ match_details.append({
167
+ "url": url,
168
+ "score": score,
169
+ "info": info
170
+ })
171
  except Exception as e:
172
+ print(f"⚠️ Error processing match at index {idx}: {e}")
173
 
174
+ risk_score = min(100, int(np.mean(scores) * 100)) if scores.size > 0 else 0
175
+
176
+ return matching_images, match_details, risk_score
 
 
 
 
 
 
 
177
 
178
  # ─────────────────────────────────────────────
179
+ # 🧠 STEP 7: GPT-4 ANALYSIS
180
  # ─────────────────────────────────────────────
181
+ def generate_gpt4_analysis(match_details):
182
+ """Generate fun analysis using GPT-4"""
183
+ if not openai.api_key:
184
+ return "GPT-4 analysis not available (API key not configured)"
185
+
186
+ if not match_details:
187
+ return "No matches found for analysis"
188
+
189
  try:
190
+ names = [f"{d['info']['Name']} ({d['info']['Age']})" for d in match_details]
 
 
 
 
 
 
191
 
192
+ prompt = (
193
+ f"The uploaded face matches closely with: {', '.join(names)}. "
194
+ f"Based on this, should the user be suspicious? "
195
+ f"Analyze like a funny but smart AI dating detective. Keep it concise."
196
+ )
197
 
198
+ response = openai.chat.completions.create(
199
+ model="gpt-4",
200
+ messages=[
201
+ {"role": "system", "content": "You're a playful but intelligent AI face-matching analyst."},
202
+ {"role": "user", "content": prompt}
203
+ ]
204
+ )
 
 
 
 
 
 
 
 
 
 
205
 
206
+ return response.choices[0].message.content
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
207
  except Exception as e:
208
+ return f"(OpenAI error): {e}"
209
 
210
  # ─────────────────────────────────────────────
211
+ # πŸŽ›οΈ STEP 8: APPLICATION CLASS
212
  # ─────────────────────────────────────────────
213
+ class TinderScanner:
214
+ def __init__(self):
215
+ self.index = None
216
+ self.image_urls = []
217
+ self.profile_info = []
218
+ self.profiles = []
219
+
220
+ # Initialize OpenAI
221
+ init_openai()
222
 
223
+ def load_data(self, json_text=None, json_file=None):
224
+ """Load profile data and build index"""
225
+ try:
226
+ if json_text:
227
+ json_data = json.loads(json_text)
228
+ self.profiles = load_profile_data(json_data=json_data)
229
+ elif json_file:
230
+ self.profiles = load_profile_data(json_file_path=json_file)
231
+ else:
232
+ return "Please provide either JSON text or a JSON file"
233
+
234
+ embeddings, self.image_urls, self.profile_info = generate_embeddings(self.profiles)
235
+
236
+ if len(embeddings) > 0:
237
+ self.index = build_faiss_index(embeddings)
238
+ return f"βœ… Successfully loaded {len(self.profiles)} profiles with {len(self.image_urls)} photos"
239
+ else:
240
+ return "⚠️ No valid images found in the provided data"
241
+ except Exception as e:
242
+ return f"❌ Error loading data: {e}"
243
+
244
+ def scan_face(self, user_image, json_input=None):
245
+ """Process a user image and find matches"""
246
+ # Load data if provided and not already loaded
247
+ if json_input and not self.index:
248
+ load_result = self.load_data(json_text=json_input)
249
+ if "Successfully" not in load_result:
250
+ return [], "", "", load_result
251
+
252
+ if not self.index:
253
+ return [], "", "", "Please load profile data first by providing JSON input"
254
 
255
+ if user_image is None:
256
+ return [], "", "", "Please upload a face image"
257
+
258
+ images, match_details, risk_score = search_similar_faces(
259
+ user_image, self.index, self.image_urls, self.profile_info
 
 
 
 
260
  )
261
 
262
+ # Format match captions
263
+ captions = []
264
+ for detail in match_details:
265
+ info = detail["info"]
266
+ captions.append(f"{info['Name']} ({info['Age']}) - Score: {detail['score']:.2f}")
267
+
268
+ # Generate GPT-4 analysis
269
+ explanation = generate_gpt4_analysis(match_details)
270
+
271
+ return images, "\n".join(captions), f"{risk_score}/100", explanation
272
 
273
  # ─────────────────────────────────────────────
274
+ # πŸ–₯️ STEP 9: GRADIO UI
275
  # ─────────────────────────────────────────────
276
  def create_ui():
277
+ scanner = TinderScanner()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
278
 
279
+ with gr.Blocks(title="Enhanced Tinder Scanner") as demo:
280
+ gr.Markdown("# πŸ” Tinder Scanner Pro – Face Match Detector")
281
+ gr.Markdown("Scan a face image to find visual matches in Tinder profiles and get a cheeky GPT-4 analysis.")
282
 
283
+ with gr.Tabs():
284
+ with gr.TabItem("Setup Data"):
285
+ with gr.Row():
286
+ with gr.Column():
287
+ json_input = gr.Textbox(
288
+ label="JSON Profile Data",
289
+ placeholder='Paste JSON data here. Format: [{"Id": "...", "Name": "...", "Age": 25, "Photos": ["url1", "url2"]}]',
290
+ lines=10
291
+ )
292
+ load_btn = gr.Button("Load Profile Data", variant="primary")
293
+ data_status = gr.Textbox(label="Status")
294
 
295
+ load_btn.click(
296
+ fn=scanner.load_data,
297
+ inputs=[json_input],
298
+ outputs=[data_status]
299
+ )
300
 
301
+ with gr.TabItem("Scan Face"):
302
+ with gr.Row():
303
+ with gr.Column():
304
+ user_image = gr.Image(type="pil", label="Upload a Face Image")
305
+ scan_btn = gr.Button("Scan Face", variant="primary")
306
+
307
+ with gr.Column():
308
+ matches_gallery = gr.Gallery(label="πŸ” Top Matches", columns=[5], height="auto")
309
+ match_details = gr.Textbox(label="Match Details")
310
+ risk_score = gr.Textbox(label="🚨 Similarity Score")
311
+ gpt_analysis = gr.Textbox(label="🧠 GPT-4 Analysis")
 
312
 
313
+ scan_btn.click(
314
+ fn=scanner.scan_face,
315
+ inputs=[user_image, json_input],
316
+ outputs=[matches_gallery, match_details, risk_score, gpt_analysis]
317
+ )
 
 
 
318
 
319
  return demo
320
 
321
  # ─────────────────────────────────────────────
322
+ # πŸš€ STEP 10: MAIN EXECUTION
323
  # ─────────────────────────────────────────────
324
  if __name__ == "__main__":
325
  demo = create_ui()