kushagra124 commited on
Commit
1d61991
·
1 Parent(s): 436eff8

adding text box

Browse files
Files changed (1) hide show
  1. app.py +4 -4
app.py CHANGED
@@ -41,7 +41,7 @@ def detect_using_clip(image,prompts=[],threshould=0.4):
41
 
42
  return model_detections
43
 
44
- def display_images(image,detections,prompt='traffic light'):
45
  H,W = image.shape[:2]
46
  image_copy = image.copy()
47
  if prompt not in detections.keys():
@@ -60,10 +60,10 @@ def shot(image, labels_text,selected_categoty):
60
  print("prompts :",prompts,classes)
61
  print("Image shape ",image.shape )
62
 
63
- detections = detect_using_clip(image,prompts=prompts)
64
- print("detections :",detections)
65
  print("Ctegory ",selected_categoty)
66
- return 0
67
 
68
  iface = gr.Interface(fn=shot,
69
  inputs = ["image","text",gr.Dropdown(classes, label="Category Label",info='Select Categories')],
 
41
 
42
  return model_detections
43
 
44
+ def display_images(image,detections,prompt):
45
  H,W = image.shape[:2]
46
  image_copy = image.copy()
47
  if prompt not in detections.keys():
 
60
  print("prompts :",prompts,classes)
61
  print("Image shape ",image.shape )
62
 
63
+ model_detections = detect_using_clip(image,prompts=prompts)
64
+ print("detections :",model_detections)
65
  print("Ctegory ",selected_categoty)
66
+ return display_images(image=image,detections=model_detections,prompt=selected_categoty)
67
 
68
  iface = gr.Interface(fn=shot,
69
  inputs = ["image","text",gr.Dropdown(classes, label="Category Label",info='Select Categories')],