Abhilashvj commited on
Commit
173cecf
Β·
1 Parent(s): 653a73e

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +361 -23
app.py CHANGED
@@ -1,3 +1,5 @@
 
 
1
  import argparse
2
  import base64
3
  import io
@@ -7,7 +9,8 @@ import sys
7
  import traceback
8
  import uuid
9
  from typing import List, Optional
10
-
 
11
  import cv2
12
  import numpy as np
13
  import pandas as pd
@@ -18,21 +21,30 @@ import torch
18
  import uvicorn
19
  from dotenv import load_dotenv
20
  from fastapi import FastAPI, File, Form, HTTPException, UploadFile
21
- from PIL import Image, ImageEnhance
22
  from pydantic import BaseModel
23
  from sentence_transformers import SentenceTransformer, util
24
-
 
 
 
 
 
 
25
  load_dotenv()
26
  pinecone.init(api_key=os.getenv("PINECONE_KEY"), environment=os.getenv("PINECONE_ENV"))
27
-
 
 
 
 
 
28
  IMAGE_SIMILARITY_DEMO = "/find-similar-image/"
29
  IMAGE_SIMILARITY_PINECONE_DEMO = "/find-similar-image-pinecone/"
30
  INDEX_NAME = "imagesearch-demo"
31
  INDEX_DIMENSION = 512
32
  TMP_DIR = "tmp"
33
 
34
- image_sim_model = SentenceTransformer("clip-ViT-B-32")
35
-
36
  def enhance_image(pil_image):
37
  # Convert PIL Image to OpenCV format
38
  open_cv_image = np.array(pil_image)
@@ -74,35 +86,85 @@ def enhance_image(pil_image):
74
  return enhanced_pil_image
75
 
76
 
77
- print("checking pinecone Index")
78
  if INDEX_NAME not in pinecone.list_indexes():
79
- # delete the current index and create the new index if it does not exist
80
- for delete_index in pinecone.list_indexes():
81
- print(f"Deleting exitsing pinecone Index : {delete_index}")
82
-
83
- pinecone.delete_index(delete_index)
84
- print(f"Creating new pinecone Index : {INDEX_NAME}")
85
- pinecone.create_index(INDEX_NAME, dimension=INDEX_DIMENSION, metric="cosine")
86
 
87
  print("Connecting to Pinecone Index")
88
  index = pinecone.Index(INDEX_NAME)
89
 
 
90
 
91
- device = "cuda" if torch.cuda.is_available() else "cpu"
 
 
 
92
 
93
- os.makedirs(TMP_DIR, exist_ok=True)
 
 
94
 
 
 
 
 
95
 
96
  device = "cuda" if torch.cuda.is_available() else "cpu"
 
97
 
98
 
99
  os.makedirs(TMP_DIR, exist_ok=True)
100
 
101
-
102
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
103
 
104
  app = FastAPI(title="CV Demos")
105
 
 
 
 
 
 
 
106
 
107
  # define response
108
  @app.get("/")
@@ -110,6 +172,280 @@ def root_route():
110
  return {"error": f"Use GET {IMAGE_SIMILARITY_PINECONE_DEMO} instead of the root route!"}
111
 
112
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
113
  @app.post(IMAGE_SIMILARITY_DEMO)
114
  async def image_search_local(
115
  images_to_search: List[UploadFile], query_image: UploadFile = File(...), top_k: int = 5
@@ -204,7 +540,8 @@ async def image_search_pinecone(
204
  query_image = Image.open(io.BytesIO(contents))
205
  print("Indexing query image...")
206
  query_image = enhance_image(query_image)
207
- prompt_embedding = image_sim_model.encode(query_image, convert_to_tensor=True).tolist()
 
208
  if INDEX_NAME not in pinecone.list_indexes():
209
  return {"similar_images": [], "status": "No index found for images"}
210
 
@@ -288,9 +625,10 @@ async def image_search_pinecone(
288
  ids.append(str(uuid.uuid1()).replace("-",""))
289
 
290
  print("Encoding images to vectors...")
291
- corpus_embeddings = image_sim_model.encode(
292
- search_images, convert_to_tensor=True, show_progress_bar=True
293
- ).tolist()
 
294
  print(f"Indexing images to pinecone Index : {INDEX_NAME}")
295
  index.upsert(
296
  vectors=list(zip(ids, corpus_embeddings, meta_datas)), namespace=namespace
@@ -311,4 +649,4 @@ if __name__ == "__main__":
311
  parser.add_argument("--port", default=8000, type=int, help="port number")
312
  # parser.add_argument('--model', nargs='+', default=['yolov5s'], help='model(s) to run, i.e. --model yolov5n yolov5s')
313
  opt = parser.parse_args()
314
- uvicorn.run(app, port=opt.port)
 
1
+ import cv2
2
+ import numpy as np
3
  import argparse
4
  import base64
5
  import io
 
9
  import traceback
10
  import uuid
11
  from typing import List, Optional
12
+ from PIL import ImageEnhance
13
+ import traceback
14
  import cv2
15
  import numpy as np
16
  import pandas as pd
 
21
  import uvicorn
22
  from dotenv import load_dotenv
23
  from fastapi import FastAPI, File, Form, HTTPException, UploadFile
24
+ from PIL import Image
25
  from pydantic import BaseModel
26
  from sentence_transformers import SentenceTransformer, util
27
+ from transformers import (
28
+ AutoFeatureExtractor,
29
+ AutoModel,
30
+ DonutProcessor,
31
+ VisionEncoderDecoderModel,
32
+ )
33
+ from fashion_clip.fashion_clip import FashionCLIP
34
  load_dotenv()
35
  pinecone.init(api_key=os.getenv("PINECONE_KEY"), environment=os.getenv("PINECONE_ENV"))
36
+ DETECTION_URL = "/object-detection/"
37
+ CLASSIFICATION_URL = "/object-classification/"
38
+ QUALITY_ASSESSMENT_URL = "/quality-assessment/"
39
+ FACE_URL = "/face-anonymization/"
40
+ LICENCE_URL = "/licenceplate-anonymization/"
41
+ DOCUMENT_QA = "/document-qa/"
42
  IMAGE_SIMILARITY_DEMO = "/find-similar-image/"
43
  IMAGE_SIMILARITY_PINECONE_DEMO = "/find-similar-image-pinecone/"
44
  INDEX_NAME = "imagesearch-demo"
45
  INDEX_DIMENSION = 512
46
  TMP_DIR = "tmp"
47
 
 
 
48
  def enhance_image(pil_image):
49
  # Convert PIL Image to OpenCV format
50
  open_cv_image = np.array(pil_image)
 
86
  return enhanced_pil_image
87
 
88
 
 
89
  if INDEX_NAME not in pinecone.list_indexes():
90
+ pinecone.create_index(INDEX_NAME, dimension=512, metric='cosine')
 
 
 
 
 
 
91
 
92
  print("Connecting to Pinecone Index")
93
  index = pinecone.Index(INDEX_NAME)
94
 
95
+ os.makedirs(TMP_DIR, exist_ok=True)
96
 
97
+ # licence_model = torch.hub.load(
98
+ # "ultralytics/yolov5", "custom", path="Licenseplate_model.pt", device="cpu", force_reload=True
99
+ # )
100
+ # licence_model.cpu()
101
 
102
+ # detector = cv2.dnn.DetectionModel(
103
+ # "res10_300x300_ssd_iter_140000_fp16.caffemodel", "deploy.prototxt"
104
+ # )
105
 
106
+ # processor = DonutProcessor.from_pretrained("naver-clova-ix/donut-base-finetuned-docvqa")
107
+ # doc_qa_model = VisionEncoderDecoderModel.from_pretrained(
108
+ # "naver-clova-ix/donut-base-finetuned-docvqa"
109
+ # )
110
 
111
  device = "cuda" if torch.cuda.is_available() else "cpu"
112
+ # doc_qa_model.to(device)
113
 
114
 
115
  os.makedirs(TMP_DIR, exist_ok=True)
116
 
117
+ # model = torch.hub.load(
118
+ # "ultralytics/yolov5", "custom", path="best.pt", device="cpu", force_reload=True
119
+ # )
120
+ # model.cpu()
121
+
122
+ # classes = [
123
+ # "gas-distribution-meter",
124
+ # "gas-distribution-piping",
125
+ # "gas-distribution-regulator",
126
+ # "gas-distribution-valve",
127
+ # ]
128
+
129
+ # class_to_idx = {
130
+ # "gas-distribution-meter": 0,
131
+ # "gas-distribution-piping": 1,
132
+ # "gas-distribution-regulator": 2,
133
+ # "gas-distribution-valve": 3,
134
+ # }
135
+
136
+ # idx_to_classes = {v: k for k, v in class_to_idx.items()}
137
+ # modelname = "resnet50d"
138
+ # model_weights = "best_classifer_model.pt"
139
+ # num_classes = len(classes)
140
+
141
+ # classifier_model = timm.create_model(
142
+ # "resnet50d", pretrained=True, num_classes=num_classes, drop_path_rate=0.05
143
+ # )
144
+ # classifier_model.load_state_dict(
145
+ # torch.load(model_weights, map_location=torch.device("cpu"))["model_state_dict"]
146
+ # )
147
+
148
+ # musiq_metric = pyiqa.create_metric("musiq-koniq", device=torch.device("cpu"))
149
+ # image_sim_model = SentenceTransformer("patrickjohncyh/fashion-clip")
150
+ # from transformers import AutoProcessor, AutoModelForZeroShotImageClassification
151
+
152
+ # processor = AutoProcessor.from_pretrained("patrickjohncyh/fashion-clip")
153
+ # model = AutoModelForZeroShotImageClassification.from_pretrained("patrickjohncyh/fashion-clip")
154
+
155
+ # model_ckpt = "nateraw/vit-base-beans"
156
+ # extractor = AutoFeatureExtractor.from_pretrained(model_ckpt)
157
+ # image_sim_model = AutoModel.from_pretrained(model_ckpt)
158
+ fclip = FashionCLIP('fashion-clip')
159
 
160
  app = FastAPI(title="CV Demos")
161
 
162
+ # Define the Response
163
+ class Prediction(BaseModel):
164
+ filename: str
165
+ contenttype: str
166
+ prediction: List[float] = []
167
+
168
 
169
  # define response
170
  @app.get("/")
 
172
  return {"error": f"Use GET {IMAGE_SIMILARITY_PINECONE_DEMO} instead of the root route!"}
173
 
174
 
175
+ # @app.post(
176
+ # DETECTION_URL,
177
+ # )
178
+ # async def predict(file: UploadFile = File(...), quality_check: bool = False):
179
+ # try:
180
+ # extension = file.filename.split(".")[-1] in ("jpg", "jpeg", "png")
181
+ # if not extension:
182
+ # return "Image must be jpg or png format!"
183
+ # # read image contain
184
+ # contents = await file.read()
185
+ # pil_image = Image.open(io.BytesIO(contents))
186
+ # if quality_check:
187
+ # print("RUNNING QUALITY CEHCK BEFORE OBJEFCT DETECTION!!!")
188
+ # tmp_file = f"{TMP_DIR}/tmp.png"
189
+ # pil_image.save(tmp_file)
190
+ # score = musiq_metric(tmp_file)
191
+ # if score < 50:
192
+ # return {
193
+ # "Error": "Image quality is not sufficient enough to be considered for object detection"
194
+ # }
195
+
196
+ # results = model(pil_image, size=640) # reduce size=320 for faster inference
197
+ # return results.pandas().xyxy[0].to_json(orient="records")
198
+ # except:
199
+ # e = sys.exc_info()[1]
200
+ # raise HTTPException(status_code=500, detail=str(e))
201
+
202
+
203
+ # @app.post(CLASSIFICATION_URL)
204
+ # async def classify(file: UploadFile = File(...)):
205
+ # try:
206
+ # extension = file.filename.split(".")[-1] in ("jpg", "jpeg", "png")
207
+ # if not extension:
208
+ # return "Image must be jpg or png format!"
209
+ # # read image contain
210
+ # contents = await file.read()
211
+ # pil_image = Image.open(io.BytesIO(contents))
212
+ # data_mean = (0.485, 0.456, 0.406)
213
+ # data_std = (0.229, 0.224, 0.225)
214
+ # image_size = (224, 224)
215
+ # eval_transforms = timm.data.create_transform(
216
+ # input_size=image_size, mean=data_mean, std=data_std
217
+ # )
218
+ # eval_transforms(pil_image).unsqueeze(dim=0).shape
219
+ # classifier_model.eval()
220
+ # print("RUNNING Image Classification!!!")
221
+ # max_class_idx = np.argmax(
222
+ # classifier_model(eval_transforms(pil_image).unsqueeze(dim=0)).detach().numpy()
223
+ # )
224
+ # predicted_class = idx_to_classes[max_class_idx]
225
+ # print(f"Predicted Class idx: {max_class_idx} with name : {predicted_class}")
226
+ # return {"object": predicted_class}
227
+
228
+ # except:
229
+ # e = sys.exc_info()[1]
230
+ # raise HTTPException(status_code=500, detail=str(e))
231
+
232
+
233
+ # @app.post(QUALITY_ASSESSMENT_URL)
234
+ # async def quality_check(file: UploadFile = File(...)):
235
+ # try:
236
+ # extension = file.filename.split(".")[-1] in ("jpg", "jpeg", "png")
237
+ # if not extension:
238
+ # return "Image must be jpg or png format!"
239
+ # # read image contain
240
+ # contents = await file.read()
241
+ # pil_image = Image.open(io.BytesIO(contents))
242
+ # tmp_file = f"{TMP_DIR}/tmp.png"
243
+ # pil_image.save(tmp_file)
244
+ # score = musiq_metric(tmp_file).detach().numpy().tolist()
245
+ # return {"score": score}
246
+
247
+ # except:
248
+ # e = sys.exc_info()[1]
249
+ # raise HTTPException(status_code=500, detail=str(e))
250
+
251
+
252
+ # def anonymize_simple(image, factor=3.0):
253
+ # # automatically determine the size of the blurring kernel based
254
+ # # on the spatial dimensions of the input image
255
+ # (h, w) = image.shape[:2]
256
+ # kW = int(w / factor)
257
+ # kH = int(h / factor)
258
+ # # ensure the width of the kernel is odd
259
+ # if kW % 2 == 0:
260
+ # kW -= 1
261
+ # # ensure the height of the kernel is odd
262
+ # if kH % 2 == 0:
263
+ # kH -= 1
264
+ # # apply a Gaussian blur to the input image using our computed
265
+ # # kernel size
266
+ # return cv2.GaussianBlur(image, (kW, kH), 0)
267
+
268
+
269
+ # def anonymize_pixelate(image, blocks=3):
270
+ # # divide the input image into NxN blocks
271
+ # (h, w) = image.shape[:2]
272
+ # xSteps = np.linspace(0, w, blocks + 1, dtype="int")
273
+ # ySteps = np.linspace(0, h, blocks + 1, dtype="int")
274
+ # # loop over the blocks in both the x and y direction
275
+ # for i in range(1, len(ySteps)):
276
+ # for j in range(1, len(xSteps)):
277
+ # # compute the starting and ending (x, y)-coordinates
278
+ # # for the current block
279
+ # startX = xSteps[j - 1]
280
+ # startY = ySteps[i - 1]
281
+ # endX = xSteps[j]
282
+ # endY = ySteps[i]
283
+ # # extract the ROI using NumPy array slicing, compute the
284
+ # # mean of the ROI, and then draw a rectangle with the
285
+ # # mean RGB values over the ROI in the original image
286
+ # roi = image[startY:endY, startX:endX]
287
+ # (B, G, R) = [int(x) for x in cv2.mean(roi)[:3]]
288
+ # cv2.rectangle(image, (startX, startY), (endX, endY), (B, G, R), -1)
289
+ # # return the pixelated blurred image
290
+ # return image
291
+
292
+
293
+ # # define response
294
+ # @app.get("/")
295
+ # def root_route():
296
+ # return {"error": f"Use GET {FACE_URL} or {LICENCE_URL} instead of the root route!"}
297
+
298
+
299
+ # @app.post(
300
+ # FACE_URL,
301
+ # )
302
+ # async def face_anonymize(
303
+ # file: UploadFile = File(...), blur_type="simple", quality_check: bool = False
304
+ # ):
305
+ # """
306
+ # https://pyimagesearch.com/2020/04/06/blur-and-anonymize-faces-with-opencv-and-python/
307
+ # """
308
+ # try:
309
+ # extension = file.filename.split(".")[-1] in ("jpg", "jpeg", "png")
310
+ # if not extension:
311
+ # return "Image must be jpg or png format!"
312
+ # # read image contain
313
+ # contents = await file.read()
314
+ # pil_image = Image.open(io.BytesIO(contents)).convert("RGB")
315
+ # detector = cv2.dnn.DetectionModel(
316
+ # "res10_300x300_ssd_iter_140000_fp16.caffemodel", "deploy.prototxt"
317
+ # )
318
+ # open_cv_image = np.array(pil_image)
319
+ # # Convert RGB to BGR
320
+ # open_cv_image = open_cv_image[:, :, ::-1].copy()
321
+ # (h, w) = open_cv_image.shape[:2]
322
+ # # Getting the detections
323
+ # detections = detector.detect(open_cv_image)
324
+ # if len(detections[2]) > 0:
325
+ # for face in detections[2]:
326
+ # (x, y, w, h) = face.astype("int")
327
+ # # extract the face ROI
328
+
329
+ # face = open_cv_image[y : y + h, x : x + w]
330
+ # if blur_type == "simple":
331
+ # face = anonymize_simple(face)
332
+ # else:
333
+ # face = anonymize_pixelate(face)
334
+ # open_cv_image[y : y + h, x : x + w] = face
335
+
336
+ # _, encoded_img = cv2.imencode(".PNG", open_cv_image)
337
+
338
+ # encoded_img = base64.b64encode(encoded_img)
339
+ # return {
340
+ # "filename": file.filename,
341
+ # "dimensions": str(open_cv_image.shape),
342
+ # "encoded_img": encoded_img,
343
+ # }
344
+ # except:
345
+ # e = sys.exc_info()[1]
346
+ # print(traceback.format_exc())
347
+ # raise HTTPException(status_code=500, detail=str(e))
348
+
349
+
350
+ # @app.post(LICENCE_URL)
351
+ # async def licence_anonymize(file: UploadFile = File(...), blur_type="simple"):
352
+ # """https://www.kaggle.com/code/gowrishankarp/license-plate-detection-yolov5-pytesseract/notebook#Visualize"""
353
+ # try:
354
+ # extension = file.filename.split(".")[-1] in ("jpg", "jpeg", "png")
355
+ # if not extension:
356
+ # return "Image must be jpg or png format!"
357
+ # # read image contain
358
+ # contents = await file.read()
359
+ # pil_image = Image.open(io.BytesIO(contents))
360
+ # results = licence_model(pil_image, size=640) # reduce size=320 for faster inference
361
+ # pil_image = pil_image.convert("RGB")
362
+ # open_cv_image = np.array(pil_image)
363
+ # open_cv_image = open_cv_image[:, :, ::-1].copy()
364
+ # df = results.pandas().xyxy[0]
365
+ # for i, row in df.iterrows():
366
+ # xmin = int(row["xmin"])
367
+ # ymin = int(row["ymin"])
368
+ # xmax = int(row["xmax"])
369
+ # ymax = int(row["ymax"])
370
+ # licence = open_cv_image[ymin:ymax, xmin:xmax]
371
+ # if blur_type == "simple":
372
+ # licence = anonymize_simple(licence)
373
+ # else:
374
+ # licence = anonymize_pixelate(licence)
375
+ # open_cv_image[ymin:ymax, xmin:xmax] = licence
376
+
377
+ # _, encoded_img = cv2.imencode(".PNG", open_cv_image)
378
+
379
+ # encoded_img = base64.b64encode(encoded_img)
380
+ # return {
381
+ # "filename": file.filename,
382
+ # "dimensions": str(open_cv_image.shape),
383
+ # "encoded_img": encoded_img,
384
+ # }
385
+
386
+ # except:
387
+ # e = sys.exc_info()[1]
388
+ # raise HTTPException(status_code=500, detail=str(e))
389
+
390
+
391
+ # def process_document(image, question):
392
+ # # prepare encoder inputs
393
+ # pixel_values = processor(image, return_tensors="pt").pixel_values
394
+
395
+ # # prepare decoder inputs
396
+ # task_prompt = "<s_docvqa><s_question>{user_input}</s_question><s_answer>"
397
+ # prompt = task_prompt.replace("{user_input}", question)
398
+ # decoder_input_ids = processor.tokenizer(
399
+ # prompt, add_special_tokens=False, return_tensors="pt"
400
+ # ).input_ids
401
+
402
+ # # generate answer
403
+ # outputs = doc_qa_model.generate(
404
+ # pixel_values.to(device),
405
+ # decoder_input_ids=decoder_input_ids.to(device),
406
+ # max_length=doc_qa_model.decoder.config.max_position_embeddings,
407
+ # early_stopping=True,
408
+ # pad_token_id=processor.tokenizer.pad_token_id,
409
+ # eos_token_id=processor.tokenizer.eos_token_id,
410
+ # use_cache=True,
411
+ # num_beams=1,
412
+ # bad_words_ids=[[processor.tokenizer.unk_token_id]],
413
+ # return_dict_in_generate=True,
414
+ # )
415
+
416
+ # # postprocess
417
+ # sequence = processor.batch_decode(outputs.sequences)[0]
418
+ # sequence = sequence.replace(processor.tokenizer.eos_token, "").replace(
419
+ # processor.tokenizer.pad_token, ""
420
+ # )
421
+ # sequence = re.sub(r"<.*?>", "", sequence, count=1).strip() # remove first task start token
422
+
423
+ # return processor.token2json(sequence)
424
+
425
+
426
+ # @app.post(DOCUMENT_QA)
427
+ # async def document_qa(question: str = Form(...), file: UploadFile = File(...)):
428
+
429
+ # try:
430
+ # extension = file.filename.split(".")[-1] in ("jpg", "jpeg", "png")
431
+ # if not extension:
432
+ # return "Image must be jpg or png format!"
433
+ # # read image contain
434
+ # contents = await file.read()
435
+ # pil_image = Image.open(io.BytesIO(contents))
436
+ # # tmp_file = f"{TMP_DIR}/tmp.png"
437
+ # # pil_image.save(tmp_file)
438
+ # # answer_git_large = generate_answer_git(git_processor_large, git_model_large, image, question)
439
+
440
+ # answer = process_document(pil_image, question)["answer"]
441
+
442
+ # return {"answer": answer}
443
+
444
+ # except:
445
+ # e = sys.exc_info()[1]
446
+ # raise HTTPException(status_code=500, detail=str(e))
447
+
448
+
449
  @app.post(IMAGE_SIMILARITY_DEMO)
450
  async def image_search_local(
451
  images_to_search: List[UploadFile], query_image: UploadFile = File(...), top_k: int = 5
 
540
  query_image = Image.open(io.BytesIO(contents))
541
  print("Indexing query image...")
542
  query_image = enhance_image(query_image)
543
+ # prompt_embedding = image_sim_model.encode(query_image, convert_to_tensor=True).tolist()
544
+ prompt_embedding = fclip.encode_images([query_image], batch_size=32)[0]
545
  if INDEX_NAME not in pinecone.list_indexes():
546
  return {"similar_images": [], "status": "No index found for images"}
547
 
 
625
  ids.append(str(uuid.uuid1()).replace("-",""))
626
 
627
  print("Encoding images to vectors...")
628
+ # corpus_embeddings = image_sim_model.encode(
629
+ # search_images, convert_to_tensor=True, show_progress_bar=True
630
+ # ).tolist()
631
+ corpus_embeddings = fclip.encode_images(search_images, batch_size=32)[0]
632
  print(f"Indexing images to pinecone Index : {INDEX_NAME}")
633
  index.upsert(
634
  vectors=list(zip(ids, corpus_embeddings, meta_datas)), namespace=namespace
 
649
  parser.add_argument("--port", default=8000, type=int, help="port number")
650
  # parser.add_argument('--model', nargs='+', default=['yolov5s'], help='model(s) to run, i.e. --model yolov5n yolov5s')
651
  opt = parser.parse_args()
652
+ uvicorn.run(app, port=opt.port)