LPX commited on
Commit
364ca1c
·
1 Parent(s): 4fec5e8
Files changed (1) hide show
  1. app.py +51 -37
app.py CHANGED
@@ -223,44 +223,58 @@ def predict_image_with_html(img, confidence_threshold, augment_methods, rotate_d
223
  return img_pil, html_content
224
 
225
  with gr.Blocks() as iface:
226
- gr.Markdown("# AI Generated Image / Deepfake Detection Models Evaluation")
227
-
228
- with gr.Row():
229
- with gr.Column(scale=1):
230
- image_input = gr.Image(label="Upload Image to Analyze", sources=['upload'], type='pil')
231
- with gr.Accordion("Settings", open=False, elem_id="settings_accordion"):
232
- augment_checkboxgroup = gr.CheckboxGroup(["rotate", "add_noise", "sharpen"], label="Augmentation Methods")
233
- rotate_slider = gr.Slider(0, 360, value=0, step=1, label="Rotate Degrees", visible=False)
234
- noise_slider = gr.Slider(0, 100, value=0, step=1, label="Noise Level", visible=False)
235
- sharpen_slider = gr.Slider(0, 100, value=1, step=1, label="Sharpen Strength", visible=False)
236
- confidence_slider = gr.Slider(0.0, 1.0, value=0.5, step=0.01, label="Confidence Threshold")
237
- inputs = [image_input, confidence_slider, augment_checkboxgroup, rotate_slider, noise_slider, sharpen_slider]
238
- predict_button = gr.Button("Predict")
239
- image_output = gr.Image(label="Processed Image", visible=True)
 
240
 
241
- with gr.Column(scale=2):
242
- with gr.Accordion("Project OpenSight - Model Evaluations & Playground", open=False, elem_id="project_accordion"):
243
- gr.Markdown("## OpenSight is a SOTA gen. image detection model, in pre-release prep.\n\nThis HF Space is a temporary home for us and the public to evaluate the shortcomings of current open source models.\n\n<-- Feel free to play around by starting with an image as we prepare our formal announcement.")
244
- # Custom HTML component to display results in 5 columns
245
- results_html = gr.HTML(label="Model Predictions")
246
- outputs = [image_output, results_html]
247
-
248
- # Show/hide rotate slider based on selected augmentation method
249
- augment_checkboxgroup.change(lambda methods: gr.update(visible="rotate" in methods), inputs=[augment_checkboxgroup], outputs=[rotate_slider])
250
- augment_checkboxgroup.change(lambda methods: gr.update(visible="add_noise" in methods), inputs=[augment_checkboxgroup], outputs=[noise_slider])
251
- augment_checkboxgroup.change(lambda methods: gr.update(visible="sharpen" in methods), inputs=[augment_checkboxgroup], outputs=[sharpen_slider])
252
-
253
- predict_button.click(
254
- fn=predict_image_with_html,
255
- inputs=inputs,
256
- outputs=outputs
257
- )
258
- predict_button.click(
259
- fn=None,
260
- js="() => {document.getElementById('project_accordion').open = false;}", # Close the project accordion
261
- inputs=[],
262
- outputs=[]
263
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
264
 
265
  # Launch the interface
266
  iface.launch()
 
223
  return img_pil, html_content
224
 
225
  with gr.Blocks() as iface:
226
+ with gr.Tab("AI Image Detection"):
227
+ gr.Markdown("# AI Generated Image / Deepfake Detection Models Evaluation")
228
+
229
+ with gr.Row():
230
+ with gr.Column(scale=1):
231
+ image_input = gr.Image(label="Upload Image to Analyze", sources=['upload'], type='pil')
232
+ with gr.Accordion("Settings", open=False, elem_id="settings_accordion"):
233
+ augment_checkboxgroup = gr.CheckboxGroup(["rotate", "add_noise", "sharpen"], label="Augmentation Methods")
234
+ rotate_slider = gr.Slider(0, 360, value=0, step=1, label="Rotate Degrees", visible=False)
235
+ noise_slider = gr.Slider(0, 100, value=0, step=1, label="Noise Level", visible=False)
236
+ sharpen_slider = gr.Slider(0, 100, value=1, step=1, label="Sharpen Strength", visible=False)
237
+ confidence_slider = gr.Slider(0.0, 1.0, value=0.5, step=0.01, label="Confidence Threshold")
238
+ inputs = [image_input, confidence_slider, augment_checkboxgroup, rotate_slider, noise_slider, sharpen_slider]
239
+ predict_button = gr.Button("Predict")
240
+ image_output = gr.Image(label="Processed Image", visible=True)
241
 
242
+ with gr.Column(scale=2):
243
+ with gr.Accordion("Project OpenSight - Model Evaluations & Playground", open=False, elem_id="project_accordion"):
244
+ gr.Markdown("## OpenSight is a SOTA gen. image detection model, in pre-release prep.\n\nThis HF Space is a temporary home for us and the public to evaluate the shortcomings of current open source models.\n\n<-- Feel free to play around by starting with an image as we prepare our formal announcement.")
245
+ # Custom HTML component to display results in 5 columns
246
+ results_html = gr.HTML(label="Model Predictions")
247
+ outputs = [image_output, results_html]
248
+
249
+ # Show/hide rotate slider based on selected augmentation method
250
+ augment_checkboxgroup.change(lambda methods: gr.update(visible="rotate" in methods), inputs=[augment_checkboxgroup], outputs=[rotate_slider])
251
+ augment_checkboxgroup.change(lambda methods: gr.update(visible="add_noise" in methods), inputs=[augment_checkboxgroup], outputs=[noise_slider])
252
+ augment_checkboxgroup.change(lambda methods: gr.update(visible="sharpen" in methods), inputs=[augment_checkboxgroup], outputs=[sharpen_slider])
253
+
254
+ predict_button.click(
255
+ fn=predict_image_with_html,
256
+ inputs=inputs,
257
+ outputs=outputs
258
+ )
259
+ predict_button.click(
260
+ fn=None,
261
+ js="() => {document.getElementById('project_accordion').open = false;}", # Close the project accordion
262
+ inputs=[],
263
+ outputs=[]
264
+ )
265
+
266
+ with gr.Tab("Another Interface"):
267
+ # Add components for the second interface here
268
+ gr.Markdown("# Another Interface")
269
+ # Example: Add a simple text input and output
270
+ text_input = gr.Textbox(label="Enter Text")
271
+ text_output = gr.Textbox(label="Processed Text")
272
+ text_button = gr.Button("Process Text")
273
+ text_button.click(
274
+ fn=lambda x: x.upper(), # Example function to convert text to uppercase
275
+ inputs=text_input,
276
+ outputs=text_output
277
+ )
278
 
279
  # Launch the interface
280
  iface.launch()