raja5259 commited on
Commit
bc5a689
·
verified ·
1 Parent(s): ce9108b

updated examples of prompts

Browse files
Files changed (1) hide show
  1. app.py +8 -3
app.py CHANGED
@@ -1,3 +1,7 @@
 
 
 
 
1
  import s23_openai_clip
2
  from s23_openai_clip import make_train_valid_dfs
3
  from s23_openai_clip import get_image_embeddings
@@ -51,11 +55,12 @@ model, image_embeddings = get_image_embeddings(valid_df, "best.pt")
51
 
52
 
53
  examples1 = ["dogs on the grass",
54
- "father and kid",
55
  "sunny day",
56
  "ocean",
57
  "a group of people",
58
- "forest"]
 
59
 
60
  def greet(query_text):
61
  print("Going to invoke inference_CLIP")
@@ -66,7 +71,7 @@ gallery = gr.Gallery(
66
  columns=[3], rows=[3], object_fit="contain", height="auto")
67
 
68
  demo = gr.Interface(fn=greet,
69
- inputs=gr.Dropdown(choices=examples1, label="Pre-defined Prompt"),
70
  outputs=gallery,
71
  title="Open AI CLIP")
72
 
 
1
+ # https://huggingface.co/spaces/raja5259/EraV2S23
2
+ # https://github.com/rajayourfriend/EraV2/
3
+ # https://github.com/rajayourfriend/EraV2/tree/main/S23
4
+
5
  import s23_openai_clip
6
  from s23_openai_clip import make_train_valid_dfs
7
  from s23_openai_clip import get_image_embeddings
 
55
 
56
 
57
  examples1 = ["dogs on the grass",
58
+ "parent and kid",
59
  "sunny day",
60
  "ocean",
61
  "a group of people",
62
+ "forest",
63
+ "ocean"]
64
 
65
  def greet(query_text):
66
  print("Going to invoke inference_CLIP")
 
71
  columns=[3], rows=[3], object_fit="contain", height="auto")
72
 
73
  demo = gr.Interface(fn=greet,
74
+ inputs=gr.Dropdown(choices=examples1, label="Search Image by text prompt"),
75
  outputs=gallery,
76
  title="Open AI CLIP")
77