clockclock commited on
Commit
5819ee4
·
verified ·
1 Parent(s): f070901

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +101 -77
app.py CHANGED
@@ -4,11 +4,32 @@ import numpy as np
4
  from PIL import Image
5
  import os
6
 
7
- # Load the Haar Cascade classifier for face detection
8
- face_cascade_path = os.path.join(os.path.dirname(__file__), "haarcascade_frontalface_default.xml")
9
- face_cascade = cv2.CascadeClassifier(face_cascade_path)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
10
 
11
- def process_image(image, x, y, effect_type):
 
 
 
 
12
  if image is None:
13
  return None, "Please upload an image first."
14
 
@@ -16,55 +37,53 @@ def process_image(image, x, y, effect_type):
16
  img_np_bgr = cv2.cvtColor(img_np, cv2.COLOR_RGB2BGR)
17
  processed_img_np_bgr = img_np_bgr.copy()
18
 
19
- gray = cv2.cvtColor(img_np_bgr, cv2.COLOR_BGR2GRAY)
20
- faces = face_cascade.detectMultiScale(gray, 1.1, 4)
 
 
 
 
 
 
 
21
 
22
- target_roi = None
23
- target_x, target_y, target_w, target_h = None, None, None, None
24
  status_message = ""
25
 
26
- # Find the face closest to the clicked coordinates
27
- if x is not None and y is not None:
28
- min_distance = float('inf')
29
- for (fx, fy, fw, fh) in faces:
30
- # Calculate center of the face
31
- face_center_x = fx + fw // 2
32
- face_center_y = fy + fh // 2
33
- distance = np.sqrt((face_center_x - x)**2 + (face_center_y - y)**2)
34
- if distance < min_distance and distance < 100: # Only consider faces within 100 pixels
35
- min_distance = distance
36
- target_x, target_y, target_w, target_h = fx, fy, fw, fh
37
-
38
- if target_x is not None:
39
- # Apply effect to the detected face
40
- roi = processed_img_np_bgr[target_y:target_y+target_h, target_x:target_x+target_w]
41
- status_message = f"Applied {effect_type} effect to detected face."
42
-
43
- if effect_type == "blur":
44
- processed_roi = cv2.GaussianBlur(roi, (35, 35), 0)
45
- elif effect_type == "sharpen":
46
- kernel = np.array([[-1,-1,-1], [-1,9,-1], [-1,-1,-1]])
47
- processed_roi = cv2.filter2D(roi, -1, kernel)
48
- elif effect_type == "grayscale":
49
- processed_roi = cv2.cvtColor(roi, cv2.COLOR_BGR2GRAY)
50
- processed_roi = cv2.cvtColor(processed_roi, cv2.COLOR_GRAY2BGR)
51
- elif effect_type == "pixelate":
52
- h, w = roi.shape[:2]
53
- temp = cv2.resize(roi, (w//10, h//10), interpolation=cv2.INTER_LINEAR)
54
- processed_roi = cv2.resize(temp, (w, h), interpolation=cv2.INTER_NEAREST)
55
- else:
56
- processed_roi = roi # No effect
57
-
58
- processed_img_np_bgr[target_y:target_y+target_h, target_x:target_x+target_w] = processed_roi
59
- elif x is not None and y is not None: # If no face detected near click, apply to a general region
60
- region_size = 100
61
- x1 = max(0, x - region_size // 2)
62
- y1 = max(0, y - region_size // 2)
63
- x2 = min(image.width, x + region_size // 2)
64
- y2 = min(image.height, y + region_size // 2)
65
 
66
  roi = processed_img_np_bgr[y1:y2, x1:x2]
67
- status_message = f"Applied {effect_type} effect to clicked region."
68
 
69
  if effect_type == "blur":
70
  processed_roi = cv2.GaussianBlur(roi, (15, 15), 0)
@@ -75,15 +94,18 @@ def process_image(image, x, y, effect_type):
75
  processed_roi = cv2.cvtColor(roi, cv2.COLOR_BGR2GRAY)
76
  processed_roi = cv2.cvtColor(processed_roi, cv2.COLOR_GRAY2BGR)
77
  elif effect_type == "pixelate":
78
- h, w = roi.shape[:2]
79
- temp = cv2.resize(roi, (w//10, h//10), interpolation=cv2.INTER_LINEAR)
80
- processed_roi = cv2.resize(temp, (w, h), interpolation=cv2.INTER_NEAREST)
81
  else:
82
- processed_roi = roi # No effect
83
 
84
- processed_img_np_bgr[y1:y1+roi.shape[0], x1:x1+roi.shape[1]] = processed_roi
85
- else:
86
- status_message = "Please click on the image to select a region."
 
 
 
87
 
88
  img_pil = Image.fromarray(cv2.cvtColor(processed_img_np_bgr, cv2.COLOR_BGR2RGB))
89
  return img_pil, status_message
@@ -92,11 +114,18 @@ def detect_faces_only(image):
92
  if image is None:
93
  return None, "Please upload an image first."
94
 
 
 
 
95
  img_np = np.array(image)
96
  img_np_bgr = cv2.cvtColor(img_np, cv2.COLOR_RGB2BGR)
97
 
98
  gray = cv2.cvtColor(img_np_bgr, cv2.COLOR_BGR2GRAY)
99
- faces = face_cascade.detectMultiScale(gray, 1.1, 4)
 
 
 
 
100
 
101
  # Draw rectangles around detected faces
102
  for (x, y, w, h) in faces:
@@ -128,17 +157,19 @@ css = """
128
  with gr.Blocks(css=css, title="AI Image Editor") as demo:
129
  gr.HTML("<h1 class='main-header'>🎨 AI Image Editor (CPU-friendly)</h1>")
130
 
131
- gr.HTML("""
 
 
132
  <div class='instruction-text'>
 
133
  <strong>Instructions:</strong>
134
  <ol>
135
  <li>Upload an image using the file uploader</li>
136
- <li>Click on any part of the image to select a region</li>
137
  <li>Choose an effect from the dropdown menu</li>
138
- <li>Click "Apply Effect" to process the selected region</li>
139
- <li>Use "Detect Faces" to see all detected faces with blue rectangles</li>
140
  </ol>
141
- <em>Note: The app will prioritize faces near your click location, or apply effects to a general region if no face is detected nearby.</em>
142
  </div>
143
  """)
144
 
@@ -160,7 +191,8 @@ with gr.Blocks(css=css, title="AI Image Editor") as demo:
160
 
161
  with gr.Row():
162
  process_button = gr.Button("✨ Apply Effect", variant="primary", size="lg")
163
- detect_button = gr.Button("👤 Detect Faces", variant="secondary", size="lg")
 
164
 
165
  status_text = gr.Textbox(
166
  label="📊 Status",
@@ -175,26 +207,18 @@ with gr.Blocks(css=css, title="AI Image Editor") as demo:
175
  height=400
176
  )
177
 
178
- # Store click coordinates
179
- clicked_x = gr.State(None)
180
- clicked_y = gr.State(None)
181
-
182
- def get_coords(evt: gr.SelectData):
183
- return evt.index[0], evt.index[1]
184
-
185
- input_image.select(get_coords, None, [clicked_x, clicked_y])
186
-
187
  process_button.click(
188
  fn=process_image,
189
- inputs=[input_image, clicked_x, clicked_y, effect_dropdown],
190
  outputs=[output_image, status_text]
191
  )
192
 
193
- detect_button.click(
194
- fn=detect_faces_only,
195
- inputs=[input_image],
196
- outputs=[output_image, status_text]
197
- )
 
198
 
199
  gr.HTML("""
200
  <div style='text-align: center; margin-top: 20px; color: #6c757d;'>
 
4
  from PIL import Image
5
  import os
6
 
7
+ # Try to load the Haar Cascade classifier for face detection
8
+ # Handle the case where the file might not be found
9
+ face_cascade = None
10
+ cascade_paths = [
11
+ "haarcascade_frontalface_default.xml",
12
+ "./haarcascade_frontalface_default.xml",
13
+ os.path.join(os.path.dirname(__file__), "haarcascade_frontalface_default.xml"),
14
+ cv2.data.haarcascades + "haarcascade_frontalface_default.xml"
15
+ ]
16
+
17
+ for path in cascade_paths:
18
+ if os.path.exists(path):
19
+ face_cascade = cv2.CascadeClassifier(path)
20
+ if not face_cascade.empty():
21
+ print(f"Successfully loaded Haar Cascade from: {path}")
22
+ break
23
+ else:
24
+ print(f"Failed to load Haar Cascade from: {path}")
25
+ else:
26
+ print(f"File not found: {path}")
27
 
28
+ if face_cascade is None or face_cascade.empty():
29
+ print("Warning: Could not load Haar Cascade classifier. Face detection will be disabled.")
30
+ face_cascade = None
31
+
32
+ def process_image(image, effect_type):
33
  if image is None:
34
  return None, "Please upload an image first."
35
 
 
37
  img_np_bgr = cv2.cvtColor(img_np, cv2.COLOR_RGB2BGR)
38
  processed_img_np_bgr = img_np_bgr.copy()
39
 
40
+ # Only try face detection if cascade is loaded
41
+ faces = []
42
+ if face_cascade is not None:
43
+ gray = cv2.cvtColor(img_np_bgr, cv2.COLOR_BGR2GRAY)
44
+ try:
45
+ faces = face_cascade.detectMultiScale(gray, 1.1, 4)
46
+ except Exception as e:
47
+ print(f"Face detection error: {e}")
48
+ faces = []
49
 
 
 
50
  status_message = ""
51
 
52
+ if len(faces) > 0:
53
+ # Apply effect to all detected faces
54
+ for (x, y, w, h) in faces:
55
+ roi = processed_img_np_bgr[y:y+h, x:x+w]
56
+
57
+ if effect_type == "blur":
58
+ processed_roi = cv2.GaussianBlur(roi, (35, 35), 0)
59
+ elif effect_type == "sharpen":
60
+ kernel = np.array([[-1,-1,-1], [-1,9,-1], [-1,-1,-1]])
61
+ processed_roi = cv2.filter2D(roi, -1, kernel)
62
+ elif effect_type == "grayscale":
63
+ processed_roi = cv2.cvtColor(roi, cv2.COLOR_BGR2GRAY)
64
+ processed_roi = cv2.cvtColor(processed_roi, cv2.COLOR_GRAY2BGR)
65
+ elif effect_type == "pixelate":
66
+ h_roi, w_roi = roi.shape[:2]
67
+ temp = cv2.resize(roi, (w_roi//10, h_roi//10), interpolation=cv2.INTER_LINEAR)
68
+ processed_roi = cv2.resize(temp, (w_roi, h_roi), interpolation=cv2.INTER_NEAREST)
69
+ else:
70
+ processed_roi = roi
71
+
72
+ processed_img_np_bgr[y:y+h, x:x+w] = processed_roi
73
+
74
+ status_message = f"Applied {effect_type} effect to {len(faces)} detected face(s)."
75
+ else:
76
+ # Apply effect to center region if no faces detected
77
+ h, w = img_np_bgr.shape[:2]
78
+ center_x, center_y = w // 2, h // 2
79
+ region_size = min(200, w//3, h//3)
80
+
81
+ x1 = max(0, center_x - region_size // 2)
82
+ y1 = max(0, center_y - region_size // 2)
83
+ x2 = min(w, center_x + region_size // 2)
84
+ y2 = min(h, center_y + region_size // 2)
 
 
 
 
 
 
85
 
86
  roi = processed_img_np_bgr[y1:y2, x1:x2]
 
87
 
88
  if effect_type == "blur":
89
  processed_roi = cv2.GaussianBlur(roi, (15, 15), 0)
 
94
  processed_roi = cv2.cvtColor(roi, cv2.COLOR_BGR2GRAY)
95
  processed_roi = cv2.cvtColor(processed_roi, cv2.COLOR_GRAY2BGR)
96
  elif effect_type == "pixelate":
97
+ h_roi, w_roi = roi.shape[:2]
98
+ temp = cv2.resize(roi, (w_roi//10, h_roi//10), interpolation=cv2.INTER_LINEAR)
99
+ processed_roi = cv2.resize(temp, (w_roi, h_roi), interpolation=cv2.INTER_NEAREST)
100
  else:
101
+ processed_roi = roi
102
 
103
+ processed_img_np_bgr[y1:y2, x1:x2] = processed_roi
104
+
105
+ if face_cascade is None:
106
+ status_message = f"Applied {effect_type} effect to center region (face detection unavailable)."
107
+ else:
108
+ status_message = f"No faces detected. Applied {effect_type} effect to center region."
109
 
110
  img_pil = Image.fromarray(cv2.cvtColor(processed_img_np_bgr, cv2.COLOR_BGR2RGB))
111
  return img_pil, status_message
 
114
  if image is None:
115
  return None, "Please upload an image first."
116
 
117
+ if face_cascade is None:
118
+ return image, "Face detection is not available (Haar Cascade not loaded)."
119
+
120
  img_np = np.array(image)
121
  img_np_bgr = cv2.cvtColor(img_np, cv2.COLOR_RGB2BGR)
122
 
123
  gray = cv2.cvtColor(img_np_bgr, cv2.COLOR_BGR2GRAY)
124
+
125
+ try:
126
+ faces = face_cascade.detectMultiScale(gray, 1.1, 4)
127
+ except Exception as e:
128
+ return image, f"Face detection error: {str(e)}"
129
 
130
  # Draw rectangles around detected faces
131
  for (x, y, w, h) in faces:
 
157
  with gr.Blocks(css=css, title="AI Image Editor") as demo:
158
  gr.HTML("<h1 class='main-header'>🎨 AI Image Editor (CPU-friendly)</h1>")
159
 
160
+ face_detection_status = "✅ Face detection enabled" if face_cascade is not None else "⚠️ Face detection disabled (Haar Cascade not found)"
161
+
162
+ gr.HTML(f"""
163
  <div class='instruction-text'>
164
+ <strong>Status:</strong> {face_detection_status}<br><br>
165
  <strong>Instructions:</strong>
166
  <ol>
167
  <li>Upload an image using the file uploader</li>
 
168
  <li>Choose an effect from the dropdown menu</li>
169
+ <li>Click "Apply Effect" to process the image</li>
170
+ <li>If face detection is available, use "Detect Faces" to see detected faces</li>
171
  </ol>
172
+ <em>Note: If face detection is available, effects will be applied to detected faces. Otherwise, effects will be applied to the center region.</em>
173
  </div>
174
  """)
175
 
 
191
 
192
  with gr.Row():
193
  process_button = gr.Button("✨ Apply Effect", variant="primary", size="lg")
194
+ if face_cascade is not None:
195
+ detect_button = gr.Button("👤 Detect Faces", variant="secondary", size="lg")
196
 
197
  status_text = gr.Textbox(
198
  label="📊 Status",
 
207
  height=400
208
  )
209
 
 
 
 
 
 
 
 
 
 
210
  process_button.click(
211
  fn=process_image,
212
+ inputs=[input_image, effect_dropdown],
213
  outputs=[output_image, status_text]
214
  )
215
 
216
+ if face_cascade is not None:
217
+ detect_button.click(
218
+ fn=detect_faces_only,
219
+ inputs=[input_image],
220
+ outputs=[output_image, status_text]
221
+ )
222
 
223
  gr.HTML("""
224
  <div style='text-align: center; margin-top: 20px; color: #6c757d;'>