Spaces:
Runtime error
Runtime error
update app.py with new image selector
Browse files
app.py
CHANGED
@@ -6,6 +6,7 @@ import torch
|
|
6 |
from PIL import Image
|
7 |
import requests
|
8 |
import os
|
|
|
9 |
|
10 |
|
11 |
device = "cuda" if torch.cuda.is_available() else "cpu"
|
@@ -20,6 +21,12 @@ blipprocessor = BlipProcessor.from_pretrained(model_id)
|
|
20 |
|
21 |
im_dir = os.path.join(os.getcwd(),'images')
|
22 |
|
|
|
|
|
|
|
|
|
|
|
|
|
23 |
def evaluate_caption(image, caption):
|
24 |
# # Pre-process image
|
25 |
# image = processor(images=image, return_tensors="pt").to(device)
|
@@ -51,13 +58,13 @@ def evaluate_caption(image, caption):
|
|
51 |
return blip_caption,winner
|
52 |
# ,gr.Image(type="pil", value="mukherjee_kushin_WIDPICS1.jpg")
|
53 |
|
54 |
-
|
55 |
callback = gr.HuggingFaceDatasetSaver('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', "gradioTest")
|
56 |
with gr.Blocks() as demo:
|
57 |
im_path_str = 'n01677366_12918.JPEG'
|
58 |
im_path = gr.Textbox(label="Image fname",value=im_path_str,interactive=False, visible=False)
|
59 |
# fn=evaluate_caption,
|
60 |
# inputs=["image", "text"]
|
|
|
61 |
|
62 |
with gr.Column():
|
63 |
im = gr.Image(label="Target Image", interactive = False, type="pil",value =os.path.join(im_dir,im_path_str),height=500)
|
@@ -67,6 +74,7 @@ with gr.Blocks() as demo:
|
|
67 |
with gr.Column():
|
68 |
out1 = gr.Textbox(label="Player 2 (Machine) Caption",interactive=False)
|
69 |
out2 = gr.Textbox(label="Winner",interactive=False)
|
|
|
70 |
|
71 |
|
72 |
# live=False,
|
@@ -74,8 +82,9 @@ with gr.Blocks() as demo:
|
|
74 |
callback.setup([caps, out1, out2, im_path], "flagged_data_points")
|
75 |
# callback.flag([image, caption, blip_caption, winner])
|
76 |
submit_btn.click(fn = evaluate_caption,inputs = [im,caps], outputs = [out1, out2],api_name="test").success(lambda *args: callback.flag(args), [caps, out1, out2, im_path], None, preprocess=False)
|
|
|
77 |
# with gr.Row():
|
78 |
# btn = gr.Button("Flag")
|
79 |
# btn.click(lambda *args: callback.flag(args), [im, caps, out1, out2], None, preprocess=False)
|
80 |
|
81 |
-
demo.launch(
|
|
|
6 |
from PIL import Image
|
7 |
import requests
|
8 |
import os
|
9 |
+
import random
|
10 |
|
11 |
|
12 |
device = "cuda" if torch.cuda.is_available() else "cpu"
|
|
|
21 |
|
22 |
im_dir = os.path.join(os.getcwd(),'images')
|
23 |
|
24 |
+
def sample_image(im_dir=im_dir):
|
25 |
+
all_ims = os.listdir(im_dir)
|
26 |
+
new_im = random.choice(all_ims)
|
27 |
+
return gr.Image(label="Target Image", interactive = False, type="pil",value =os.path.join(im_dir,new_im),height=500),gr.Textbox(label="Image fname",value=new_im,interactive=False, visible=False)
|
28 |
+
|
29 |
+
|
30 |
def evaluate_caption(image, caption):
|
31 |
# # Pre-process image
|
32 |
# image = processor(images=image, return_tensors="pt").to(device)
|
|
|
58 |
return blip_caption,winner
|
59 |
# ,gr.Image(type="pil", value="mukherjee_kushin_WIDPICS1.jpg")
|
60 |
|
|
|
61 |
callback = gr.HuggingFaceDatasetSaver('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', "gradioTest")
|
62 |
with gr.Blocks() as demo:
|
63 |
im_path_str = 'n01677366_12918.JPEG'
|
64 |
im_path = gr.Textbox(label="Image fname",value=im_path_str,interactive=False, visible=False)
|
65 |
# fn=evaluate_caption,
|
66 |
# inputs=["image", "text"]
|
67 |
+
|
68 |
|
69 |
with gr.Column():
|
70 |
im = gr.Image(label="Target Image", interactive = False, type="pil",value =os.path.join(im_dir,im_path_str),height=500)
|
|
|
74 |
with gr.Column():
|
75 |
out1 = gr.Textbox(label="Player 2 (Machine) Caption",interactive=False)
|
76 |
out2 = gr.Textbox(label="Winner",interactive=False)
|
77 |
+
reload_btn = gr.Button("Next Image")
|
78 |
|
79 |
|
80 |
# live=False,
|
|
|
82 |
callback.setup([caps, out1, out2, im_path], "flagged_data_points")
|
83 |
# callback.flag([image, caption, blip_caption, winner])
|
84 |
submit_btn.click(fn = evaluate_caption,inputs = [im,caps], outputs = [out1, out2],api_name="test").success(lambda *args: callback.flag(args), [caps, out1, out2, im_path], None, preprocess=False)
|
85 |
+
reload_btn.click(fn = sample_image, inputs=None, outputs = [im,im_path] )
|
86 |
# with gr.Row():
|
87 |
# btn = gr.Button("Flag")
|
88 |
# btn.click(lambda *args: callback.flag(args), [im, caps, out1, out2], None, preprocess=False)
|
89 |
|
90 |
+
demo.launch()
|