LPX
commited on
Commit
·
364ca1c
1
Parent(s):
4fec5e8
test: ui
Browse files
app.py
CHANGED
@@ -223,44 +223,58 @@ def predict_image_with_html(img, confidence_threshold, augment_methods, rotate_d
|
|
223 |
return img_pil, html_content
|
224 |
|
225 |
with gr.Blocks() as iface:
|
226 |
-
gr.
|
227 |
-
|
228 |
-
|
229 |
-
with gr.
|
230 |
-
|
231 |
-
|
232 |
-
|
233 |
-
|
234 |
-
|
235 |
-
|
236 |
-
|
237 |
-
|
238 |
-
|
239 |
-
|
|
|
240 |
|
241 |
-
|
242 |
-
|
243 |
-
|
244 |
-
|
245 |
-
|
246 |
-
|
247 |
-
|
248 |
-
|
249 |
-
|
250 |
-
|
251 |
-
|
252 |
-
|
253 |
-
|
254 |
-
|
255 |
-
|
256 |
-
|
257 |
-
|
258 |
-
|
259 |
-
|
260 |
-
|
261 |
-
|
262 |
-
|
263 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
264 |
|
265 |
# Launch the interface
|
266 |
iface.launch()
|
|
|
223 |
return img_pil, html_content
|
224 |
|
225 |
with gr.Blocks() as iface:
|
226 |
+
with gr.Tab("AI Image Detection"):
|
227 |
+
gr.Markdown("# AI Generated Image / Deepfake Detection Models Evaluation")
|
228 |
+
|
229 |
+
with gr.Row():
|
230 |
+
with gr.Column(scale=1):
|
231 |
+
image_input = gr.Image(label="Upload Image to Analyze", sources=['upload'], type='pil')
|
232 |
+
with gr.Accordion("Settings", open=False, elem_id="settings_accordion"):
|
233 |
+
augment_checkboxgroup = gr.CheckboxGroup(["rotate", "add_noise", "sharpen"], label="Augmentation Methods")
|
234 |
+
rotate_slider = gr.Slider(0, 360, value=0, step=1, label="Rotate Degrees", visible=False)
|
235 |
+
noise_slider = gr.Slider(0, 100, value=0, step=1, label="Noise Level", visible=False)
|
236 |
+
sharpen_slider = gr.Slider(0, 100, value=1, step=1, label="Sharpen Strength", visible=False)
|
237 |
+
confidence_slider = gr.Slider(0.0, 1.0, value=0.5, step=0.01, label="Confidence Threshold")
|
238 |
+
inputs = [image_input, confidence_slider, augment_checkboxgroup, rotate_slider, noise_slider, sharpen_slider]
|
239 |
+
predict_button = gr.Button("Predict")
|
240 |
+
image_output = gr.Image(label="Processed Image", visible=True)
|
241 |
|
242 |
+
with gr.Column(scale=2):
|
243 |
+
with gr.Accordion("Project OpenSight - Model Evaluations & Playground", open=False, elem_id="project_accordion"):
|
244 |
+
gr.Markdown("## OpenSight is a SOTA gen. image detection model, in pre-release prep.\n\nThis HF Space is a temporary home for us and the public to evaluate the shortcomings of current open source models.\n\n<-- Feel free to play around by starting with an image as we prepare our formal announcement.")
|
245 |
+
# Custom HTML component to display results in 5 columns
|
246 |
+
results_html = gr.HTML(label="Model Predictions")
|
247 |
+
outputs = [image_output, results_html]
|
248 |
+
|
249 |
+
# Show/hide rotate slider based on selected augmentation method
|
250 |
+
augment_checkboxgroup.change(lambda methods: gr.update(visible="rotate" in methods), inputs=[augment_checkboxgroup], outputs=[rotate_slider])
|
251 |
+
augment_checkboxgroup.change(lambda methods: gr.update(visible="add_noise" in methods), inputs=[augment_checkboxgroup], outputs=[noise_slider])
|
252 |
+
augment_checkboxgroup.change(lambda methods: gr.update(visible="sharpen" in methods), inputs=[augment_checkboxgroup], outputs=[sharpen_slider])
|
253 |
+
|
254 |
+
predict_button.click(
|
255 |
+
fn=predict_image_with_html,
|
256 |
+
inputs=inputs,
|
257 |
+
outputs=outputs
|
258 |
+
)
|
259 |
+
predict_button.click(
|
260 |
+
fn=None,
|
261 |
+
js="() => {document.getElementById('project_accordion').open = false;}", # Close the project accordion
|
262 |
+
inputs=[],
|
263 |
+
outputs=[]
|
264 |
+
)
|
265 |
+
|
266 |
+
with gr.Tab("Another Interface"):
|
267 |
+
# Add components for the second interface here
|
268 |
+
gr.Markdown("# Another Interface")
|
269 |
+
# Example: Add a simple text input and output
|
270 |
+
text_input = gr.Textbox(label="Enter Text")
|
271 |
+
text_output = gr.Textbox(label="Processed Text")
|
272 |
+
text_button = gr.Button("Process Text")
|
273 |
+
text_button.click(
|
274 |
+
fn=lambda x: x.upper(), # Example function to convert text to uppercase
|
275 |
+
inputs=text_input,
|
276 |
+
outputs=text_output
|
277 |
+
)
|
278 |
|
279 |
# Launch the interface
|
280 |
iface.launch()
|