added title and examples for prompt
Browse files
app.py
CHANGED
@@ -50,15 +50,22 @@ print("Going to invoke make_train_valid_dfs")
|
|
50 |
model, image_embeddings = get_image_embeddings(valid_df, "best.pt")
|
51 |
|
52 |
|
|
|
|
|
|
|
|
|
|
|
53 |
def greet(query_text):
|
54 |
print("Going to invoke inference_CLIP")
|
55 |
return inference_CLIP(query_text)
|
56 |
|
57 |
gallery = gr.Gallery(
|
58 |
-
label="
|
59 |
columns=[3], rows=[3], object_fit="contain", height="auto")
|
60 |
-
|
61 |
-
demo = gr.Interface(fn=greet,
|
62 |
-
|
|
|
|
|
63 |
print("Going to invoke demo.launch")
|
64 |
demo.launch("debug")
|
|
|
50 |
model, image_embeddings = get_image_embeddings(valid_df, "best.pt")
|
51 |
|
52 |
|
53 |
+
examples1 = ["dogs on the grass",
|
54 |
+
"cat and dog",
|
55 |
+
"sunny day",
|
56 |
+
"raining in forest"]
|
57 |
+
|
58 |
def greet(query_text):
|
59 |
print("Going to invoke inference_CLIP")
|
60 |
return inference_CLIP(query_text)
|
61 |
|
62 |
gallery = gr.Gallery(
|
63 |
+
label="CLIP result images", show_label=True, elem_id="gallery",
|
64 |
columns=[3], rows=[3], object_fit="contain", height="auto")
|
65 |
+
|
66 |
+
demo = gr.Interface(fn=greet,
|
67 |
+
inputs=gr.Dropdown(choices=examples1, label="Pre-defined Prompt"),,
|
68 |
+
outputs=gallery,
|
69 |
+
title="Open AI CLIP")
|
70 |
print("Going to invoke demo.launch")
|
71 |
demo.launch("debug")
|