abhisheksan commited on
Commit
be986b5
·
1 Parent(s): 8487406

Refactor face manipulation detection in forgery_routes.py and improve error handling in forgery_image_utils.py

Browse files
app/api/forgery_routes.py CHANGED
@@ -84,7 +84,10 @@ async def process_image(firebase_filename: str):
84
  results["face_manipulation"] = face_manipulation_service.detect_manipulation(firebase_filename)
85
  logging.info(f"Face manipulation detection result: {results['face_manipulation']}")
86
  else:
87
- results["face_manipulation"] = None
 
 
 
88
  logging.info("Face manipulation detection skipped (no face detected)")
89
  logging.info(f"Image processing completed for: {firebase_filename}")
90
  return results
@@ -132,7 +135,10 @@ async def process_video(firebase_filename: str):
132
  "collective_detection": False,
133
  "collective_confidence": 0.0
134
  },
135
- "face_manipulation": None,
 
 
 
136
  "gan_detection": {
137
  "collective_detection": False,
138
  "collective_confidence": 0.0
 
84
  results["face_manipulation"] = face_manipulation_service.detect_manipulation(firebase_filename)
85
  logging.info(f"Face manipulation detection result: {results['face_manipulation']}")
86
  else:
87
+ results["face_manipulation"] = {
88
+ "is_manipulated": False,
89
+ "confidence": "0%"
90
+ }
91
  logging.info("Face manipulation detection skipped (no face detected)")
92
  logging.info(f"Image processing completed for: {firebase_filename}")
93
  return results
 
135
  "collective_detection": False,
136
  "collective_confidence": 0.0
137
  },
138
+ "face_manipulation": {
139
+ "collective_detection": False,
140
+ "collective_confidence": 0.0
141
+ },
142
  "gan_detection": {
143
  "collective_detection": False,
144
  "collective_confidence": 0.0
app/utils/forgery_image_utils.py CHANGED
@@ -4,11 +4,44 @@ from typing import Union
4
  from PIL import Image
5
  from io import BytesIO
6
  import imghdr
 
 
7
  from fastapi import HTTPException
 
8
  from app.utils.file_utils import get_file_content
 
9
 
10
  SUPPORTED_IMAGE_FORMATS = ['.jpg', '.jpeg', '.png', '.bmp', '.gif', '.tiff', '.webp']
11
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
12
  def verify_image_format(firebase_filename: str):
13
  content = get_file_content(firebase_filename)
14
  file_ext = '.' + (imghdr.what(BytesIO(content)) or '')
@@ -38,19 +71,29 @@ def strip_metadata(img: Image.Image) -> Image.Image:
38
  img_without_exif.putdata(data)
39
  return img_without_exif
40
 
41
- def detect_face(image_content: bytes) -> bool:
42
  """
43
  Enhanced face detection using cascaded classifiers.
44
  Args:
45
- image_content: Raw image bytes
46
  Returns:
47
  bool: True if any faces are detected, False otherwise
48
  """
49
  try:
50
- # Decode image
51
- nparr = np.frombuffer(image_content, np.uint8)
52
- img = cv2.imdecode(nparr, cv2.IMREAD_COLOR)
 
 
 
 
 
 
 
 
 
53
  if img is None:
 
54
  return False
55
 
56
  # Convert to grayscale
@@ -60,40 +103,20 @@ def detect_face(image_content: bytes) -> bool:
60
  clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8,8))
61
  enhanced_gray = clahe.apply(gray)
62
 
63
- # Try frontal face detection first
64
- face_cascade = cv2.CascadeClassifier(cv2.data.haarcascades + 'haarcascade_frontalface_default.xml')
65
- faces = face_cascade.detectMultiScale(
66
- enhanced_gray,
67
- scaleFactor=1.1,
68
- minNeighbors=4,
69
- minSize=(30, 30)
70
- )
71
-
72
- if len(faces) > 0:
73
- return True
74
-
75
- # Try alternate frontal face classifier
76
- alt_cascade = cv2.CascadeClassifier(cv2.data.haarcascades + 'haarcascade_frontalface_alt2.xml')
77
- faces = alt_cascade.detectMultiScale(
78
- enhanced_gray,
79
- scaleFactor=1.15,
80
- minNeighbors=3,
81
- minSize=(30, 30)
82
- )
83
 
84
- if len(faces) > 0:
85
- return True
86
-
87
- # Try profile face detection as last resort
88
- profile_cascade = cv2.CascadeClassifier(cv2.data.haarcascades + 'haarcascade_profileface.xml')
89
- faces = profile_cascade.detectMultiScale(
90
- enhanced_gray,
91
- scaleFactor=1.1,
92
- minNeighbors=3,
93
- minSize=(30, 30)
94
- )
95
-
96
- return len(faces) > 0
97
-
98
- except Exception:
99
  return False
 
4
  from PIL import Image
5
  from io import BytesIO
6
  import imghdr
7
+ import cv2
8
+ import os
9
  from fastapi import HTTPException
10
+ import io
11
  from app.utils.file_utils import get_file_content
12
+ import logging
13
 
14
  SUPPORTED_IMAGE_FORMATS = ['.jpg', '.jpeg', '.png', '.bmp', '.gif', '.tiff', '.webp']
15
 
16
+ # Set up logging
17
+ logging.basicConfig(level=logging.INFO)
18
+ logger = logging.getLogger(__name__)
19
+
20
+ # Define the paths to the XML files
21
+ current_dir = os.path.dirname(os.path.abspath(__file__))
22
+ project_root = os.path.dirname(os.path.dirname(current_dir))
23
+ xml_paths = {
24
+ 'frontal': os.path.join(project_root, 'models', 'haarcascade_frontalface_default.xml'),
25
+ 'frontal_alt': os.path.join(project_root, 'models', 'haarcascade_frontalface_alt2.xml'),
26
+ 'profile': os.path.join(project_root, 'models', 'haarcascade_profileface.xml')
27
+ }
28
+
29
+ # Try to load the pre-trained face detection models
30
+ face_cascades = {}
31
+ for name, path in xml_paths.items():
32
+ try:
33
+ if not os.path.exists(path):
34
+ logger.error(f"Error: XML file not found at {path}")
35
+ continue
36
+ cascade = cv2.CascadeClassifier(path)
37
+ if cascade.empty():
38
+ logger.error(f"Error: Unable to load the cascade classifier. XML file is empty or invalid: {path}")
39
+ else:
40
+ face_cascades[name] = cascade
41
+ logger.info(f"Successfully loaded face detection model from: {path}")
42
+ except Exception as e:
43
+ logger.error(f"Error loading face detection model {name}: {str(e)}")
44
+
45
  def verify_image_format(firebase_filename: str):
46
  content = get_file_content(firebase_filename)
47
  file_ext = '.' + (imghdr.what(BytesIO(content)) or '')
 
71
  img_without_exif.putdata(data)
72
  return img_without_exif
73
 
74
+ def detect_face(image_input) -> bool:
75
  """
76
  Enhanced face detection using cascaded classifiers.
77
  Args:
78
+ image_input: Either raw image bytes or a filename
79
  Returns:
80
  bool: True if any faces are detected, False otherwise
81
  """
82
  try:
83
+ # Determine if the input is bytes or a filename
84
+ if isinstance(image_input, bytes):
85
+ # Decode image from bytes
86
+ nparr = np.frombuffer(image_input, np.uint8)
87
+ img = cv2.imdecode(nparr, cv2.IMREAD_COLOR)
88
+ elif isinstance(image_input, str):
89
+ # Read image from file
90
+ img = cv2.imread(image_input)
91
+ else:
92
+ logger.error("Invalid input type for detect_face")
93
+ return False
94
+
95
  if img is None:
96
+ logger.error("Failed to load image in detect_face")
97
  return False
98
 
99
  # Convert to grayscale
 
103
  clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8,8))
104
  enhanced_gray = clahe.apply(gray)
105
 
106
+ # Try each cascade classifier
107
+ for name, cascade in face_cascades.items():
108
+ faces = cascade.detectMultiScale(
109
+ enhanced_gray,
110
+ scaleFactor=1.1,
111
+ minNeighbors=4,
112
+ minSize=(30, 30)
113
+ )
114
+ if len(faces) > 0:
115
+ logger.info(f"Face detected using {name} classifier")
116
+ return True
 
 
 
 
 
 
 
 
 
117
 
118
+ logger.info("No face detected")
119
+ return False
120
+ except Exception as e:
121
+ logger.error(f"Error in detect_face: {str(e)}")
 
 
 
 
 
 
 
 
 
 
 
122
  return False
models/haarcascade_frontalface_alt2.xml ADDED
The diff for this file is too large to render. See raw diff
 
models/haarcascade_frontalface_default.xml ADDED
@@ -0,0 +1,41 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import cv2
2
+ import numpy as np
3
+ from fastapi import FastAPI, HTTPException
4
+ import requests
5
+ from PIL import Image
6
+ from io import BytesIO
7
+
8
+ app = FastAPI()
9
+
10
+ # Load the pre-trained face detection model
11
+ face_cascade = cv2.CascadeClassifier(cv2.data.haarcascades + 'haarcascade_frontalface_default.xml')
12
+
13
+ def detect_face(image):
14
+ # Convert image to grayscale
15
+ gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
16
+
17
+ # Detect faces
18
+ faces = face_cascade.detectMultiScale(gray, scaleFactor=1.1, minNeighbors=5, minSize=(30, 30))
19
+
20
+ return len(faces) > 0
21
+
22
+ @app.get("/detect_face/")
23
+ async def detect_face_in_url(image_url: str):
24
+ try:
25
+ # Download the image from the URL
26
+ response = requests.get(image_url)
27
+ image = Image.open(BytesIO(response.content))
28
+
29
+ # Convert PIL Image to OpenCV format
30
+ opencv_image = cv2.cvtColor(np.array(image), cv2.COLOR_RGB2BGR)
31
+
32
+ # Perform face detection
33
+ face_detected = detect_face(opencv_image)
34
+
35
+ return {"face_detected": face_detected}
36
+ except Exception as e:
37
+ raise HTTPException(status_code=400, detail=str(e))
38
+
39
+ if __name__ == "__main__":
40
+ import uvicorn
41
+ uvicorn.run(app, host="0.0.0.0", port=8000)
models/haarcascade_profileface.xml ADDED
The diff for this file is too large to render. See raw diff