Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -59,12 +59,12 @@ def get_results(image, prompt):
|
|
59 |
|
60 |
|
61 |
# Define the input components for Gradio (adding a new input for the prompt)
|
62 |
-
image_input = gr.inputs.Image()
|
63 |
-
text_input = gr.inputs.Textbox(label="Prompt") # New input for the text prompt
|
64 |
|
65 |
|
66 |
-
# Define the output components for Gradio (including both image and text)
|
67 |
-
outputs = gr.Image(type="numpy", label="Output Image")
|
68 |
|
69 |
# Define the text description within an HTML <div> element
|
70 |
description_html = """
|
@@ -105,16 +105,29 @@ title = "autoannotation"
|
|
105 |
|
106 |
description = "This is a project description. It demonstrates how to use Gradio with an image and text input to interact with an API."
|
107 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
108 |
# Create a Blocks object and use it as a context manager
|
109 |
with gr.Blocks() as demo:
|
110 |
gr.Markdown(
|
111 |
"""
|
112 |
# Tuba Autoannotation Demo
|
113 |
-
|
114 |
-
|
|
|
|
|
115 |
"""
|
116 |
)
|
117 |
# Define the input components and add them to the layout
|
|
|
118 |
with gr.Row():
|
119 |
image_input = gr.inputs.Image()
|
120 |
output = gr.Image(type="numpy", label="Output Image")
|
@@ -128,6 +141,12 @@ with gr.Blocks() as demo:
|
|
128 |
# Define the event listener that connects the input and output components and triggers the function
|
129 |
button.click(fn=get_results, inputs=[image_input, text_input], outputs=output, api_name="get_results")
|
130 |
# Add the description below the layout
|
|
|
|
|
|
|
|
|
|
|
|
|
131 |
gr.Markdown(description_html)
|
132 |
# Launch the app
|
133 |
demo.launch(share=False)
|
|
|
59 |
|
60 |
|
61 |
# Define the input components for Gradio (adding a new input for the prompt)
|
62 |
+
# image_input = gr.inputs.Image()
|
63 |
+
# text_input = gr.inputs.Textbox(label="Prompt") # New input for the text prompt
|
64 |
|
65 |
|
66 |
+
# # Define the output components for Gradio (including both image and text)
|
67 |
+
# outputs = gr.Image(type="numpy", label="Output Image")
|
68 |
|
69 |
# Define the text description within an HTML <div> element
|
70 |
description_html = """
|
|
|
105 |
|
106 |
description = "This is a project description. It demonstrates how to use Gradio with an image and text input to interact with an API."
|
107 |
|
108 |
+
import os
|
109 |
+
examples = [
|
110 |
+
["3000.jpeg",'person,car,traffic sign,traffic light'],
|
111 |
+
["original (8).jpg", 'person,car,traffic sign,traffic light'],
|
112 |
+
["traffic.jpg", 'person,car,traffic sign,traffic light'],
|
113 |
+
]
|
114 |
+
|
115 |
+
|
116 |
+
|
117 |
+
|
118 |
# Create a Blocks object and use it as a context manager
|
119 |
with gr.Blocks() as demo:
|
120 |
gr.Markdown(
|
121 |
"""
|
122 |
# Tuba Autoannotation Demo
|
123 |
+
|
124 |
+
This is your private demo for [Tuba Autoannotation](https://Tuba.ai)
|
125 |
+
|
126 |
+
A prompt based controllable model for auto annotation
|
127 |
"""
|
128 |
)
|
129 |
# Define the input components and add them to the layout
|
130 |
+
|
131 |
with gr.Row():
|
132 |
image_input = gr.inputs.Image()
|
133 |
output = gr.Image(type="numpy", label="Output Image")
|
|
|
141 |
# Define the event listener that connects the input and output components and triggers the function
|
142 |
button.click(fn=get_results, inputs=[image_input, text_input], outputs=output, api_name="get_results")
|
143 |
# Add the description below the layout
|
144 |
+
gr.Examples(
|
145 |
+
fn=get_results,
|
146 |
+
examples=examples,
|
147 |
+
inputs=[image_input, text_input],
|
148 |
+
outputs=[outputs]
|
149 |
+
)
|
150 |
gr.Markdown(description_html)
|
151 |
# Launch the app
|
152 |
demo.launch(share=False)
|