SkalskiP commited on
Commit
c3f2745
·
1 Parent(s): 5755b4d

examples added

Browse files
Files changed (1) hide show
  1. app.py +42 -31
app.py CHANGED
@@ -6,7 +6,8 @@ import spaces
6
  from utils.annotate import annotate_with_boxes
7
  from utils.models import load_models, run_inference, CHECKPOINTS
8
  from utils.tasks import TASK_NAMES, TASKS, OBJECT_DETECTION_TASK_NAME, \
9
- CAPTION_TASK_NAMES
 
10
 
11
  MARKDOWN = """
12
  # Better Florence-2 Playground 🔥
@@ -26,12 +27,14 @@ MARKDOWN = """
26
  </div>
27
  """
28
 
29
- # OBJECT_DETECTION_EXAMPLES = [
30
- # ["microsoft/Florence-2-large-ft", "Object Detection", "https://media.roboflow.com/notebooks/examples/dog-2.jpeg"]
31
- # ]
32
- # CAPTION_EXAMPLES = [
33
- # ["microsoft/Florence-2-large-ft", "Caption", "https://media.roboflow.com/notebooks/examples/dog-2.jpeg"]
34
- # ]
 
 
35
 
36
  DEVICE = torch.device("cuda" if torch.cuda.is_available() else "cpu")
37
  MODELS, PROCESSORS = load_models(DEVICE)
@@ -54,6 +57,10 @@ def process(checkpoint_dropdown, task_dropdown, image_input):
54
  return response[task]
55
 
56
 
 
 
 
 
57
  with gr.Blocks() as demo:
58
  gr.Markdown(MARKDOWN)
59
  with gr.Row():
@@ -75,6 +82,7 @@ with gr.Blocks() as demo:
75
  @gr.render(inputs=task_dropdown_component)
76
  def show_output(text):
77
  if text == OBJECT_DETECTION_TASK_NAME:
 
78
  image_output_component = gr.Image(type='pil', label='Image Output')
79
  submit_button_component.click(
80
  fn=process,
@@ -86,6 +94,7 @@ with gr.Blocks() as demo:
86
  outputs=image_output_component
87
  )
88
  elif text in CAPTION_TASK_NAMES:
 
89
  text_output_component = gr.Textbox(label='Caption Output')
90
  submit_button_component.click(
91
  fn=process,
@@ -97,29 +106,31 @@ with gr.Blocks() as demo:
97
  outputs=text_output_component
98
  )
99
 
100
- # @gr.render(inputs=task_dropdown_component)
101
- # def show_examples(text):
102
- # if text == "Object Detection":
103
- # gr.Examples(
104
- # fn=process,
105
- # examples=OBJECT_DETECTION_EXAMPLES,
106
- # inputs=[
107
- # checkpoint_dropdown_component,
108
- # task_dropdown_component,
109
- # image_input_component
110
- # ],
111
- # outputs=image_output_component
112
- # )
113
- # elif text == "Caption":
114
- # gr.Examples(
115
- # fn=process,
116
- # examples=CAPTION_EXAMPLES,
117
- # inputs=[
118
- # checkpoint_dropdown_component,
119
- # task_dropdown_component,
120
- # image_input_component
121
- # ],
122
- # outputs=text_output_component
123
- # )
 
 
124
 
125
  demo.launch(debug=False, show_error=True, max_threads=1)
 
6
  from utils.annotate import annotate_with_boxes
7
  from utils.models import load_models, run_inference, CHECKPOINTS
8
  from utils.tasks import TASK_NAMES, TASKS, OBJECT_DETECTION_TASK_NAME, \
9
+ CAPTION_TASK_NAMES, CAPTION_TASK_NAME, DETAILED_CAPTION_TASK_NAME, \
10
+ MORE_DETAILED_CAPTION_TASK_NAME
11
 
12
  MARKDOWN = """
13
  # Better Florence-2 Playground 🔥
 
27
  </div>
28
  """
29
 
30
+ OBJECT_DETECTION_EXAMPLES = [
31
+ ["microsoft/Florence-2-large-ft", OBJECT_DETECTION_TASK_NAME, "https://media.roboflow.com/notebooks/examples/dog-2.jpeg"]
32
+ ]
33
+ CAPTION_EXAMPLES = [
34
+ ["microsoft/Florence-2-large-ft", CAPTION_TASK_NAME, "https://media.roboflow.com/notebooks/examples/dog-2.jpeg"],
35
+ ["microsoft/Florence-2-large-ft", DETAILED_CAPTION_TASK_NAME, "https://media.roboflow.com/notebooks/examples/dog-2.jpeg"],
36
+ ["microsoft/Florence-2-large-ft", MORE_DETAILED_CAPTION_TASK_NAME, "https://media.roboflow.com/notebooks/examples/dog-2.jpeg"]
37
+ ]
38
 
39
  DEVICE = torch.device("cuda" if torch.cuda.is_available() else "cpu")
40
  MODELS, PROCESSORS = load_models(DEVICE)
 
57
  return response[task]
58
 
59
 
60
+ image_output_component = None
61
+ text_output_component = None
62
+
63
+
64
  with gr.Blocks() as demo:
65
  gr.Markdown(MARKDOWN)
66
  with gr.Row():
 
82
  @gr.render(inputs=task_dropdown_component)
83
  def show_output(text):
84
  if text == OBJECT_DETECTION_TASK_NAME:
85
+ global image_output_component
86
  image_output_component = gr.Image(type='pil', label='Image Output')
87
  submit_button_component.click(
88
  fn=process,
 
94
  outputs=image_output_component
95
  )
96
  elif text in CAPTION_TASK_NAMES:
97
+ global text_output_component
98
  text_output_component = gr.Textbox(label='Caption Output')
99
  submit_button_component.click(
100
  fn=process,
 
106
  outputs=text_output_component
107
  )
108
 
109
+ @gr.render(inputs=task_dropdown_component)
110
+ def show_examples(text):
111
+ if text == OBJECT_DETECTION_TASK_NAME:
112
+ global image_output_component
113
+ gr.Examples(
114
+ fn=process,
115
+ examples=OBJECT_DETECTION_EXAMPLES,
116
+ inputs=[
117
+ checkpoint_dropdown_component,
118
+ task_dropdown_component,
119
+ image_input_component
120
+ ],
121
+ outputs=image_output_component
122
+ )
123
+ elif text in CAPTION_TASK_NAMES:
124
+ global text_output_component
125
+ gr.Examples(
126
+ fn=process,
127
+ examples=CAPTION_EXAMPLES,
128
+ inputs=[
129
+ checkpoint_dropdown_component,
130
+ task_dropdown_component,
131
+ image_input_component
132
+ ],
133
+ outputs=text_output_component
134
+ )
135
 
136
  demo.launch(debug=False, show_error=True, max_threads=1)