Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -1,126 +1,97 @@
|
|
1 |
import gradio as gr
|
2 |
|
3 |
import gradio as gr
|
4 |
-
import cv2
|
5 |
import numpy as np
|
6 |
-
from PIL import Image
|
7 |
import time
|
8 |
|
9 |
-
|
10 |
-
|
11 |
-
#
|
12 |
-
|
13 |
-
# def preprocess_image(img):
|
14 |
-
# # Your preprocessing code
|
15 |
-
# pass
|
16 |
-
# def predict_spoof(img):
|
17 |
-
# # Your prediction code
|
18 |
-
# pass
|
19 |
|
20 |
-
def
|
21 |
-
|
22 |
-
|
23 |
-
|
24 |
-
|
25 |
-
|
26 |
-
|
27 |
-
|
28 |
-
|
29 |
-
# Add overlay text to frame
|
30 |
-
text = f"Prediction: {result['prediction']}"
|
31 |
-
conf_text = f"Confidence: {result['confidence']:.2f}"
|
32 |
-
|
33 |
-
# Create a semi-transparent overlay
|
34 |
-
overlay = frame_rgb.copy()
|
35 |
-
cv2.rectangle(overlay, (10, 10), (300, 80), (0, 0, 0), -1)
|
36 |
-
frame_rgb = cv2.addWeighted(overlay, 0.3, frame_rgb, 0.7, 0)
|
37 |
-
|
38 |
-
# Add text
|
39 |
-
cv2.putText(frame_rgb, text, (20, 40), cv2.FONT_HERSHEY_SIMPLEX,
|
40 |
-
0.8, (255, 255, 255), 2)
|
41 |
-
cv2.putText(frame_rgb, conf_text, (20, 70), cv2.FONT_HERSHEY_SIMPLEX,
|
42 |
-
0.8, (255, 255, 255), 2)
|
43 |
-
|
44 |
-
return frame_rgb
|
45 |
-
|
46 |
-
def process_uploaded_image(image):
|
47 |
-
# Convert to RGB if needed
|
48 |
-
if isinstance(image, np.ndarray):
|
49 |
-
if len(image.shape) == 3 and image.shape[2] == 3:
|
50 |
-
image_rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
|
51 |
-
else:
|
52 |
-
image_rgb = image
|
53 |
-
else:
|
54 |
-
image_rgb = np.array(image)
|
55 |
-
|
56 |
-
# Make prediction (implement with your model)
|
57 |
-
# result = predict_spoof(image_rgb)
|
58 |
-
# Placeholder result
|
59 |
-
result = {"prediction": "Spoof", "confidence": 0.88}
|
60 |
-
|
61 |
-
# Create output image with overlay
|
62 |
-
output_img = image_rgb.copy()
|
63 |
-
h, w = output_img.shape[:2]
|
64 |
-
|
65 |
-
# Add semi-transparent overlay
|
66 |
-
overlay = output_img.copy()
|
67 |
-
cv2.rectangle(overlay, (10, 10), (300, 80), (0, 0, 0), -1)
|
68 |
-
output_img = cv2.addWeighted(overlay, 0.3, output_img, 0.7, 0)
|
69 |
-
|
70 |
-
# Add prediction text
|
71 |
-
text = f"Prediction: {result['prediction']}"
|
72 |
-
conf_text = f"Confidence: {result['confidence']:.2f}"
|
73 |
-
|
74 |
-
cv2.putText(output_img, text, (20, 40), cv2.FONT_HERSHEY_SIMPLEX,
|
75 |
-
0.8, (255, 255, 255), 2)
|
76 |
-
cv2.putText(output_img, conf_text, (20, 70), cv2.FONT_HERSHEY_SIMPLEX,
|
77 |
-
0.8, (255, 255, 255), 2)
|
78 |
-
|
79 |
-
return output_img
|
80 |
|
81 |
-
# Create Gradio interface
|
82 |
with gr.Blocks(theme=gr.themes.Soft()) as demo:
|
|
|
83 |
gr.Markdown("""
|
84 |
-
# Face Spoofing Detection
|
85 |
-
|
86 |
""")
|
87 |
|
|
|
88 |
with gr.Tabs():
|
|
|
89 |
with gr.Tab("Webcam Detection"):
|
90 |
with gr.Row():
|
91 |
-
|
92 |
-
|
93 |
-
|
94 |
-
|
95 |
-
|
96 |
-
|
97 |
-
|
98 |
-
|
|
|
99 |
|
|
|
100 |
with gr.Tab("Image Upload"):
|
101 |
with gr.Row():
|
102 |
-
|
103 |
-
|
|
|
|
|
|
|
|
|
|
|
104 |
|
105 |
-
image_button = gr.Button("
|
106 |
-
image_button.click(
|
107 |
-
process_uploaded_image,
|
108 |
-
inputs=image_input,
|
109 |
-
outputs=image_output
|
110 |
-
)
|
111 |
|
112 |
-
|
113 |
-
|
114 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
115 |
|
116 |
-
|
117 |
-
|
118 |
-
|
119 |
-
|
120 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
121 |
|
|
|
122 |
if __name__ == "__main__":
|
123 |
demo.launch(share=True)
|
124 |
-
|
125 |
# demo = gr.Interface(fn=greet, inputs="text", outputs="text")
|
126 |
# demo.launch()
|
|
|
1 |
import gradio as gr
|
2 |
|
3 |
import gradio as gr
|
|
|
4 |
import numpy as np
|
|
|
5 |
import time
|
6 |
|
7 |
+
def placeholder_process(image):
|
8 |
+
"""Placeholder function - replace with your backend integration"""
|
9 |
+
time.sleep(1) # Simulate processing time
|
10 |
+
return image, "Real", 0.95
|
|
|
|
|
|
|
|
|
|
|
|
|
11 |
|
12 |
+
def format_result(image, label, confidence):
|
13 |
+
"""Format and display the result"""
|
14 |
+
return (
|
15 |
+
image,
|
16 |
+
f"Detection Result: {label}",
|
17 |
+
f"Confidence Score: {confidence:.2%}",
|
18 |
+
gr.update(visible=True)
|
19 |
+
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
20 |
|
|
|
21 |
with gr.Blocks(theme=gr.themes.Soft()) as demo:
|
22 |
+
# Header
|
23 |
gr.Markdown("""
|
24 |
+
# Face Spoofing Detection System
|
25 |
+
### Detect real vs spoofed faces in real-time or from uploaded images
|
26 |
""")
|
27 |
|
28 |
+
# Main Interface
|
29 |
with gr.Tabs():
|
30 |
+
# Webcam Tab
|
31 |
with gr.Tab("Webcam Detection"):
|
32 |
with gr.Row():
|
33 |
+
with gr.Column(scale=2):
|
34 |
+
webcam = gr.Image(source="webcam", streaming=True, label="Webcam Feed")
|
35 |
+
with gr.Column(scale=1):
|
36 |
+
webcam_status = gr.Textbox(label="Status", value="Ready", interactive=False)
|
37 |
+
webcam_result = gr.Textbox(label="Detection", interactive=False)
|
38 |
+
webcam_conf = gr.Textbox(label="Confidence", interactive=False)
|
39 |
+
webcam_alert = gr.Alert(visible=False)
|
40 |
+
|
41 |
+
webcam_button = gr.Button("Start Detection", variant="primary")
|
42 |
|
43 |
+
# Image Upload Tab
|
44 |
with gr.Tab("Image Upload"):
|
45 |
with gr.Row():
|
46 |
+
with gr.Column(scale=2):
|
47 |
+
image_input = gr.Image(label="Upload Image", type="numpy")
|
48 |
+
with gr.Column(scale=1):
|
49 |
+
image_status = gr.Textbox(label="Status", value="Ready", interactive=False)
|
50 |
+
image_result = gr.Textbox(label="Detection", interactive=False)
|
51 |
+
image_conf = gr.Textbox(label="Confidence", interactive=False)
|
52 |
+
image_alert = gr.Alert(visible=False)
|
53 |
|
54 |
+
image_button = gr.Button("Analyze Image", variant="primary")
|
|
|
|
|
|
|
|
|
|
|
55 |
|
56 |
+
# Info Section
|
57 |
+
with gr.Accordion("Information", open=False):
|
58 |
+
gr.Markdown("""
|
59 |
+
### How to Use
|
60 |
+
1. Choose either Webcam or Image Upload mode
|
61 |
+
2. For webcam: Click 'Start Detection' to begin real-time analysis
|
62 |
+
3. For images: Upload an image and click 'Analyze Image'
|
63 |
+
|
64 |
+
### Best Practices
|
65 |
+
- Ensure good lighting conditions
|
66 |
+
- Position face clearly in the frame
|
67 |
+
- Keep steady and avoid rapid movements
|
68 |
+
- For best results, maintain a distance of 30-60cm from the camera
|
69 |
+
|
70 |
+
### System Requirements
|
71 |
+
- Webcam with minimum 720p resolution (for live detection)
|
72 |
+
- Stable internet connection
|
73 |
+
- Supported browsers: Chrome, Firefox, Safari
|
74 |
+
""")
|
75 |
|
76 |
+
# Event handlers
|
77 |
+
def update_status(is_webcam=True):
|
78 |
+
prefix = "Webcam" if is_webcam else "Image"
|
79 |
+
return f"{prefix} analysis in progress..."
|
80 |
+
|
81 |
+
image_button.click(
|
82 |
+
fn=lambda img: (update_status(False), *placeholder_process(img)),
|
83 |
+
inputs=[image_input],
|
84 |
+
outputs=[image_status, image_input, image_result, image_conf, image_alert]
|
85 |
+
)
|
86 |
+
|
87 |
+
webcam_button.click(
|
88 |
+
fn=lambda img: (update_status(True), *placeholder_process(img)),
|
89 |
+
inputs=[webcam],
|
90 |
+
outputs=[webcam_status, webcam, webcam_result, webcam_conf, webcam_alert]
|
91 |
+
)
|
92 |
|
93 |
+
# Launch the interface
|
94 |
if __name__ == "__main__":
|
95 |
demo.launch(share=True)
|
|
|
96 |
# demo = gr.Interface(fn=greet, inputs="text", outputs="text")
|
97 |
# demo.launch()
|