aaappp7878 commited on
Commit
443a961
·
verified ·
1 Parent(s): 2865491

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +556 -993
app.py CHANGED
@@ -1,970 +1,596 @@
1
  import gradio as gr
2
  import torch
3
- from PIL import Image
4
  import numpy as np
5
  import cv2
6
- from transformers import AutoImageProcessor, AutoModelForImageClassification
 
 
 
7
  from skimage.feature import graycomatrix, graycoprops, local_binary_pattern
8
- from scipy import ndimage, stats
 
9
 
10
- # 加载多个检测模型
11
- models = {
12
- "model1": {
13
- "name": "umm-maybe/AI-image-detector",
14
- "processor": None,
15
- "model": None,
16
- "weight": 0.5
17
- },
18
- "model2": {
19
- "name": "microsoft/resnet-50", # 通用图像分类模型
20
- "processor": None,
21
- "model": None,
22
- "weight": 0.25
23
- },
24
- "model3": {
25
- "name": "google/vit-base-patch16-224", # Vision Transformer模型
26
- "processor": None,
27
- "model": None,
28
- "weight": 0.25
29
- }
30
- }
31
 
32
- # 初始化模型
33
- for key in models:
34
- try:
35
- models[key]["processor"] = AutoImageProcessor.from_pretrained(models[key]["name"])
36
- models[key]["model"] = AutoModelForImageClassification.from_pretrained(models[key]["name"])
37
- print(f"成功加载模型: {models[key]['name']}")
38
- except Exception as e:
39
- print(f"加载模型 {models[key]['name']} 失败: {str(e)}")
40
- models[key]["processor"] = None
41
- models[key]["model"] = None
42
 
43
- def process_model_output(model_info, outputs, probabilities):
44
- """处理不同模型的输出,统一返回AI生成概率"""
45
- model_name = model_info["name"].lower()
46
-
47
- # 针对不同模型的特殊处理
48
- if "ai-image-detector" in model_name:
49
- # umm-maybe/AI-image-detector模型特殊处理
50
- # 检查标签
51
- ai_label_idx = None
52
- human_label_idx = None
53
-
54
- for idx, label in model_info["model"].config.id2label.items():
55
- label_lower = label.lower()
56
- if "ai" in label_lower or "generated" in label_lower or "fake" in label_lower:
57
- ai_label_idx = idx
58
- if "human" in label_lower or "real" in label_lower:
59
- human_label_idx = idx
60
-
61
- # 修正后的标签解释逻辑
62
- if human_label_idx is not None:
63
- # 如果预测为human,则AI概率应该低
64
- ai_probability = 1 - float(probabilities[0][human_label_idx].item())
65
- elif ai_label_idx is not None:
66
- # 如果预测为AI,则AI概率应该高
67
- ai_probability = float(probabilities[0][ai_label_idx].item())
68
- else:
69
- # 默认情况
70
- ai_probability = 0.5
71
-
72
- elif "resnet" in model_name:
73
- # 通用图像分类模型,使用简单启发式方法
74
- predicted_class_idx = outputs.logits.argmax(-1).item()
75
- # 检查是否有与AI相关的类别
76
- predicted_class = model_info["model"].config.id2label[predicted_class_idx].lower()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
77
 
78
- # 简单启发式:检查类别名称是否包含与AI生成相关的关键词
79
- ai_keywords = ["artificial", "generated", "synthetic", "fake", "computer"]
80
- for keyword in ai_keywords:
81
- if keyword in predicted_class:
82
- return float(probabilities[0][predicted_class_idx].item())
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
83
 
84
- # 如果没有明确的AI类别,返回中等概率
85
- return 0.5
86
-
87
- elif "vit" in model_name:
88
- # Vision Transformer模型
89
  predicted_class_idx = outputs.logits.argmax(-1).item()
90
- # 同样检查类别名称
91
  predicted_class = model_info["model"].config.id2label[predicted_class_idx].lower()
92
 
93
- # 简单启发式:检查类别名称是否包含与AI生成相关的关键词
94
  ai_keywords = ["artificial", "generated", "synthetic", "fake", "computer"]
95
  for keyword in ai_keywords:
96
  if keyword in predicted_class:
97
  return float(probabilities[0][predicted_class_idx].item())
98
 
99
- # 如果没有明确的AI类别,返回中等概率
100
- return 0.5
101
-
102
- # 默认处理
103
- predicted_class_idx = outputs.logits.argmax(-1).item()
104
- predicted_class = model_info["model"].config.id2label[predicted_class_idx].lower()
105
-
106
- if "ai" in predicted_class or "generated" in predicted_class or "fake" in predicted_class:
107
- return float(probabilities[0][predicted_class_idx].item())
108
- else:
109
- return 1 - float(probabilities[0][predicted_class_idx].item())
110
-
111
- return ai_probability
112
 
113
- def analyze_image_features(image):
114
- """分析图像特征"""
115
- # 转换为OpenCV格式
116
- img_array = np.array(image)
117
- if len(img_array.shape) == 3 and img_array.shape[2] == 3:
118
- img_cv = cv2.cvtColor(img_array, cv2.COLOR_RGB2BGR)
119
- else:
120
- img_cv = img_array
121
-
122
- features = {}
123
-
124
- # 基本特征
125
- features["width"] = image.width
126
- features["height"] = image.height
127
- features["aspect_ratio"] = image.width / max(1, image.height)
128
-
129
- # 颜色分析
130
- if len(img_array.shape) == 3:
131
- features["avg_red"] = float(np.mean(img_array[:,:,0]))
132
- features["avg_green"] = float(np.mean(img_array[:,:,1]))
133
- features["avg_blue"] = float(np.mean(img_array[:,:,2]))
134
-
135
- # 颜色标准差 - 用于检测颜色分布是否自然
136
- features["color_std"] = float(np.std([
137
- features["avg_red"],
138
- features["avg_green"],
139
- features["avg_blue"]
140
- ]))
141
-
142
- # 颜色局部变化 - 真实照片通常有更多局部颜色变化
143
- local_color_variations = []
144
- for i in range(0, img_array.shape[0]-10, 10):
145
- for j in range(0, img_array.shape[1]-10, 10):
146
- patch = img_array[i:i+10, j:j+10]
147
- local_color_variations.append(np.std(patch))
148
- features["local_color_variation"] = float(np.mean(local_color_variations))
149
-
150
- # 颜色分布的自然度 - 真实照片的颜色分布更自然
151
- r_hist, _ = np.histogram(img_array[:,:,0], bins=256, range=(0, 256))
152
- g_hist, _ = np.histogram(img_array[:,:,1], bins=256, range=(0, 256))
153
- b_hist, _ = np.histogram(img_array[:,:,2], bins=256, range=(0, 256))
154
-
155
- # 计算颜色直方图的熵 - 真实照片通常熵值更高
156
- r_entropy = stats.entropy(r_hist + 1e-10) # 添加小值避免log(0)
157
- g_entropy = stats.entropy(g_hist + 1e-10)
158
- b_entropy = stats.entropy(b_hist + 1e-10)
159
- features["color_entropy"] = float((r_entropy + g_entropy + b_entropy) / 3)
160
-
161
- # 边缘一致性分析
162
- edges = cv2.Canny(img_cv, 100, 200)
163
- features["edge_density"] = float(np.sum(edges > 0) / (image.width * image.height))
164
-
165
- # 边缘自然度分析 - 真实照片的边缘通常更自然
166
- if len(img_array.shape) == 3:
167
- gray = cv2.cvtColor(img_cv, cv2.COLOR_BGR2GRAY)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
168
  sobelx = cv2.Sobel(gray, cv2.CV_64F, 1, 0, ksize=3)
169
  sobely = cv2.Sobel(gray, cv2.CV_64F, 0, 1, ksize=3)
 
 
 
170
  edge_magnitude = np.sqrt(sobelx**2 + sobely**2)
 
171
  features["edge_variance"] = float(np.var(edge_magnitude))
172
 
173
- # 边缘方向分布 - 真实照片的边缘方向分布更自然
174
  edge_direction = np.arctan2(sobely, sobelx) * 180 / np.pi
175
- edge_dir_hist, _ = np.histogram(edge_direction[edge_magnitude > 30], bins=36, range=(-180, 180))
176
- features["edge_direction_entropy"] = float(stats.entropy(edge_dir_hist + 1e-10))
177
-
178
- # 纹理分析 - 使用灰度共生矩阵
179
- if len(img_array.shape) == 3:
180
- gray = cv2.cvtColor(img_cv, cv2.COLOR_BGR2GRAY)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
181
 
182
- # 计算GLCM
183
  distances = [5]
184
- angles = [0, np.pi/4, np.pi/2, 3*np.pi/4]
185
- glcm = graycomatrix(gray, distances=distances, angles=angles, symmetric=True, normed=True)
 
 
 
 
 
 
 
 
 
 
 
186
 
187
- # 计算GLCM属性
188
- features["texture_contrast"] = float(np.mean(graycoprops(glcm, 'contrast')[0]))
189
- features["texture_homogeneity"] = float(np.mean(graycoprops(glcm, 'homogeneity')[0]))
190
- features["texture_correlation"] = float(np.mean(graycoprops(glcm, 'correlation')[0]))
191
- features["texture_energy"] = float(np.mean(graycoprops(glcm, 'energy')[0]))
192
- features["texture_dissimilarity"] = float(np.mean(graycoprops(glcm, 'dissimilarity')[0]))
193
- features["texture_ASM"] = float(np.mean(graycoprops(glcm, 'ASM')[0]))
194
 
195
- # 局部二值模式 (LBP) - 分析微观纹理
196
  try:
197
- radius = 3
198
- n_points = 8 * radius
199
- lbp = local_binary_pattern(gray, n_points, radius, method='uniform')
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
200
  lbp_hist, _ = np.histogram(lbp, bins=n_points + 2, range=(0, n_points + 2))
201
- lbp_hist = lbp_hist.astype(float) / sum(lbp_hist)
202
  features["lbp_entropy"] = float(stats.entropy(lbp_hist + 1e-10))
203
- except:
204
- # 如果LBP分析失败,不添加这个特征
205
- pass
206
-
207
- # 噪声分析
208
- if len(img_array.shape) == 3:
209
- blurred = cv2.GaussianBlur(img_cv, (5, 5), 0)
210
- noise = cv2.absdiff(img_cv, blurred)
211
- features["noise_level"] = float(np.mean(noise))
212
-
213
- # 噪声分布 - 用于检测噪声是否自然
214
- features["noise_std"] = float(np.std(noise))
215
-
216
- # 噪声频谱分析 - 真实照片的噪声频谱更自然
217
- noise_fft = np.fft.fft2(noise[:,:,0])
218
- noise_fft_shift = np.fft.fftshift(noise_fft)
219
- noise_magnitude = np.abs(noise_fft_shift)
220
- features["noise_spectrum_std"] = float(np.std(noise_magnitude))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
221
 
222
- # 噪声的空间一致性 - AI生成图像的噪声空间分布通常不够自然
223
- noise_blocks = []
224
- block_size = 32
225
- for i in range(0, noise.shape[0]-block_size, block_size):
226
- for j in range(0, noise.shape[1]-block_size, block_size):
227
- block = noise[i:i+block_size, j:j+block_size]
228
- noise_blocks.append(np.mean(block))
229
- features["noise_spatial_std"] = float(np.std(noise_blocks))
230
-
231
- # 对称性分析 - AI生成图像通常有更高的对称性
232
- if img_cv.shape[1] % 2 == 0: # 确保宽度是偶数
233
- left_half = img_cv[:, :img_cv.shape[1]//2]
234
- right_half = cv2.flip(img_cv[:, img_cv.shape[1]//2:], 1)
235
- if left_half.shape == right_half.shape:
236
- h_symmetry = 1 - float(np.mean(cv2.absdiff(left_half, right_half)) / 255)
237
- features["horizontal_symmetry"] = h_symmetry
238
-
239
- if img_cv.shape[0] % 2 == 0: # 确保高度是偶数
240
- top_half = img_cv[:img_cv.shape[0]//2, :]
241
- bottom_half = cv2.flip(img_cv[img_cv.shape[0]//2:, :], 0)
242
- if top_half.shape == bottom_half.shape:
243
- v_symmetry = 1 - float(np.mean(cv2.absdiff(top_half, bottom_half)) / 255)
244
- features["vertical_symmetry"] = v_symmetry
245
-
246
- # 频率域分析 - 检测不自然的频率分布
247
- if len(img_array.shape) == 3:
248
- gray = cv2.cvtColor(img_cv, cv2.COLOR_BGR2GRAY)
249
- f_transform = np.fft.fft2(gray)
250
  f_shift = np.fft.fftshift(f_transform)
251
  magnitude = np.log(np.abs(f_shift) + 1)
252
 
253
- # 计算高频和低频成分的比例
254
  h, w = magnitude.shape
255
  center_h, center_w = h // 2, w // 2
256
 
257
- # 低频区域 (中心区域)
258
  low_freq_region = magnitude[center_h-h//8:center_h+h//8, center_w-w//8:center_w+w//8]
259
  low_freq_mean = np.mean(low_freq_region)
260
 
261
- # 高频区域 (边缘区域)
262
  high_freq_mean = np.mean(magnitude) - low_freq_mean
263
-
264
  features["freq_ratio"] = float(high_freq_mean / max(low_freq_mean, 0.001))
 
265
 
266
- # 频率分布的自然度 - 真实照片通常有更自然的频率分布
267
- freq_std = np.std(magnitude)
268
- features["freq_std"] = float(freq_std)
269
-
270
- # 频率分布的各向异性 - 真实照片的频率分布通常更各向异性
271
  freq_blocks = []
272
- for angle in range(0, 180, 20):
273
  mask = np.zeros_like(magnitude)
274
  cv2.ellipse(mask, (center_w, center_h), (w//2, h//2), angle, -10, 10, 1, -1)
275
  freq_blocks.append(np.mean(magnitude * mask))
276
  features["freq_anisotropy"] = float(np.std(freq_blocks))
277
-
278
- # 尝试检测人脸
279
- try:
280
- face_cascade = cv2.CascadeClassifier(cv2.data.haarcascades + 'haarcascade_frontalface_default.xml')
281
- gray = cv2.cvtColor(img_cv, cv2.COLOR_BGR2GRAY)
282
- faces = face_cascade.detectMultiScale(gray, 1.1, 4)
283
- features["face_count"] = len(faces)
284
-
285
- if len(faces) > 0:
286
- # 分析人脸特征
287
- face_features = []
288
- for (x, y, w, h) in faces:
289
- face = img_cv[y:y+h, x:x+w]
290
- # 皮肤质感分析
291
- face_hsv = cv2.cvtColor(face, cv2.COLOR_BGR2HSV)
292
- skin_mask = cv2.inRange(face_hsv, (0, 20, 70), (20, 150, 255))
293
- skin_pixels = face[skin_mask > 0]
294
- if len(skin_pixels) > 0:
295
- face_features.append({
296
- "skin_std": float(np.std(skin_pixels)),
297
- "skin_local_contrast": float(np.mean(cv2.Laplacian(face, cv2.CV_64F))),
298
- "face_symmetry": analyze_face_symmetry(face)
299
- })
300
-
301
- if face_features:
302
- features["face_skin_std"] = np.mean([f["skin_std"] for f in face_features])
303
- features["face_local_contrast"] = np.mean([f["skin_local_contrast"] for f in face_features])
304
- features["face_symmetry"] = np.mean([f["face_symmetry"] for f in face_features])
305
-
306
- # 面部微表情分析
307
- for i, (x, y, w, h) in enumerate(faces):
308
- face = gray[y:y+h, x:x+w]
309
- # 分析面部纹理的局部变化
310
- face_blocks = []
311
- block_size = 8
312
- for bi in range(0, face.shape[0]-block_size, block_size):
313
- for bj in range(0, face.shape[1]-block_size, block_size):
314
- block = face[bi:bi+block_size, bj:bj+block_size]
315
- face_blocks.append(np.std(block))
316
- if face_blocks:
317
- features[f"face_{i}_texture_variation"] = float(np.std(face_blocks))
318
- except:
319
- # 如果人脸检测失败,不添加人脸特征
320
- pass
321
-
322
- # 分析物理一致性
323
- physical_features = analyze_physical_consistency(img_cv)
324
- features.update(physical_features)
325
-
326
- # 分析细节连贯性
327
- detail_features = analyze_detail_coherence(img_cv)
328
- features.update(detail_features)
329
-
330
- # 分析衣物细节
331
- clothing_features = analyze_clothing_details(img_cv)
332
- features.update(clothing_features)
333
-
334
- # 分析手部和关节
335
- extremity_features = analyze_extremities(img_cv)
336
- features.update(extremity_features)
337
-
338
- return features
339
 
340
- def analyze_face_symmetry(face):
341
- """分析人脸对称性"""
342
- if face.shape[1] % 2 == 0: # 确保宽度是偶数
343
- left_half = face[:, :face.shape[1]//2]
344
- right_half = cv2.flip(face[:, face.shape[1]//2:], 1)
345
- if left_half.shape == right_half.shape:
346
- return 1 - float(np.mean(cv2.absdiff(left_half, right_half)) / 255)
347
- return 0.5 # 默认值
348
 
349
- def analyze_physical_consistency(image):
350
- """分析图像中的物理一致性"""
351
- features = {}
352
-
353
- try:
354
- # 转换为灰度图
355
- if len(image.shape) == 3:
356
- gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
357
- else:
358
- gray = image
359
-
360
- # 光影一致性分析
361
- # 检测亮度梯度
362
- sobelx = cv2.Sobel(gray, cv2.CV_64F, 1, 0, ksize=3)
363
- sobely = cv2.Sobel(gray, cv2.CV_64F, 0, 1, ksize=3)
364
- gradient_magnitude = np.sqrt(sobelx**2 + sobely**2)
365
- gradient_direction = np.arctan2(sobely, sobelx)
366
-
367
- # 分析梯度方向的一致性 - 真实照片的光影梯度方向更一致
368
- # 将图像分成块,分析每个块的主要梯度方向
369
- block_size = 32
370
- gradient_dirs = []
371
- for i in range(0, gray.shape[0]-block_size, block_size):
372
- for j in range(0, gray.shape[1]-block_size, block_size):
373
- block_gradient = gradient_direction[i:i+block_size, j:j+block_size]
374
- block_magnitude = gradient_magnitude[i:i+block_size, j:j+block_size]
375
- # 只考虑梯度幅值较大的像素
376
- significant_gradients = block_gradient[block_magnitude > np.mean(block_magnitude)]
377
- if len(significant_gradients) > 0:
378
- # 计算主要方向
379
- hist, _ = np.histogram(significant_gradients, bins=8, range=(-np.pi, np.pi))
380
- main_dir = np.argmax(hist)
381
- gradient_dirs.append(main_dir)
382
-
383
- if gradient_dirs:
384
- # 计算主要方向的一致性
385
- hist, _ = np.histogram(gradient_dirs, bins=8, range=(0, 8))
386
- features["light_direction_consistency"] = float(np.max(hist) / max(sum(hist), 1))
387
-
388
- # 透视一致性分析
389
- # 使用霍夫变��检测直线
390
- edges = cv2.Canny(gray, 50, 150)
391
- lines = cv2.HoughLinesP(edges, 1, np.pi/180, threshold=50, minLineLength=50, maxLineGap=10)
392
-
393
- if lines is not None and len(lines) > 1:
394
- # 分析消失点
395
- vanishing_points = []
396
- for i in range(len(lines)):
397
- for j in range(i+1, len(lines)):
398
- x1, y1, x2, y2 = lines[i][0]
399
- x3, y3, x4, y4 = lines[j][0]
400
-
401
- # 计算两条线的交点(可能的消失点)
402
- d = (x1-x2)*(y3-y4) - (y1-y2)*(x3-x4)
403
- if abs(d) > 0.001: # 避免平行线
404
- px = ((x1*y2 - y1*x2)*(x3-x4) - (x1-x2)*(x3*y4 - y3*x4)) / d
405
- py = ((x1*y2 - y1*x2)*(y3-y4) - (y1-y2)*(x3*y4 - y3*x4)) / d
406
-
407
- # 只考虑图像范围内或附近的交点
408
- img_diag = np.sqrt(gray.shape[0]**2 + gray.shape[1]**2)
409
- if -img_diag < px < 2*gray.shape[1] and -img_diag < py < 2*gray.shape[0]:
410
- vanishing_points.append((px, py))
411
-
412
- if vanishing_points:
413
- # 分析消失点的聚集程度 - 真实照片的消失点通常更聚集
414
- vp_x = [p[0] for p in vanishing_points]
415
- vp_y = [p[1] for p in vanishing_points]
416
- features["perspective_consistency"] = float(1 / (1 + np.std(vp_x) + np.std(vp_y)))
417
- except:
418
- # 如果分析失败,不添加物理一致性特征
419
- pass
420
-
421
- return features
422
-
423
- def analyze_detail_coherence(image):
424
- """分析图像细节的连贯性"""
425
- features = {}
426
-
427
- try:
428
- # 转换为灰度图
429
- if len(image.shape) == 3:
430
- gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
431
- else:
432
- gray = image
433
-
434
- # 多尺度细节分析
435
- scales = [3, 5, 9]
436
- detail_levels = []
437
-
438
- for scale in scales:
439
- # 使用不同尺度的拉普拉斯算子提取细节
440
- laplacian = cv2.Laplacian(gray, cv2.CV_64F, ksize=scale)
441
- abs_laplacian = np.abs(laplacian)
442
- detail_levels.append(np.mean(abs_laplacian))
443
-
444
- # 计算细节随尺度变化的一致性 - 真实照片的细节随尺度变化更自然
445
- if len(detail_levels) > 1:
446
- features["detail_scale_consistency"] = float(np.std(detail_levels) / max(np.mean(detail_levels), 0.001))
447
-
448
- # 分析图像不同区域的细节一致性
449
- block_size = 64
450
- detail_blocks = []
451
-
452
- for i in range(0, gray.shape[0]-block_size, block_size):
453
- for j in range(0, gray.shape[1]-block_size, block_size):
454
- block = gray[i:i+block_size, j:j+block_size]
455
- # 计算块的细节水平
456
- block_laplacian = cv2.Laplacian(block, cv2.CV_64F)
457
- detail_blocks.append(np.mean(np.abs(block_laplacian)))
458
-
459
- if detail_blocks:
460
- # 计算细节分布的均匀性 - AI生成图像的细节分布通常不够均匀
461
- features["detail_spatial_std"] = float(np.std(detail_blocks))
462
- features["detail_spatial_entropy"] = float(stats.entropy(detail_blocks + 1e-10))
463
-
464
- # 边缘过渡分析
465
- edges = cv2.Canny(gray, 50, 150)
466
- dilated = cv2.dilate(edges, np.ones((3,3), np.uint8))
467
- edge_transition = cv2.absdiff(dilated, edges)
468
-
469
- # 计算边缘过渡区域的特性 - 真实照片的边缘过渡更自然
470
- if np.sum(edge_transition) > 0:
471
- transition_values = gray[edge_transition > 0]
472
- if len(transition_values) > 0:
473
- features["edge_transition_std"] = float(np.std(transition_values))
474
- except:
475
- # 如果分析失败,不添加细节连贯性特征
476
- pass
477
-
478
- return features
479
-
480
- def analyze_clothing_details(image):
481
- """分析衣物细节的自然度"""
482
- features = {}
483
-
484
- try:
485
- # 转换为灰度图
486
- if len(image.shape) == 3:
487
- gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
488
- else:
489
- gray = image
490
-
491
- # 使用Canny边缘检测
492
- edges = cv2.Canny(gray, 50, 150)
493
-
494
- # 使用霍夫变换检测直线
495
- lines = cv2.HoughLinesP(edges, 1, np.pi/180, threshold=50, minLineLength=50, maxLineGap=10)
496
-
497
- if lines is not None:
498
- # 计算直线的角度分布
499
- angles = []
500
- for line in lines:
501
- x1, y1, x2, y2 = line[0]
502
- if x2 - x1 != 0: # 避免除以零
503
- angle = np.arctan((y2 - y1) / (x2 - x1)) * 180 / np.pi
504
- angles.append(angle)
505
-
506
- if angles:
507
- # 计算角度的标准差 - AI生成的衣物褶皱通常角度分布不自然
508
- features["clothing_angle_std"] = float(np.std(angles))
509
-
510
- # 计算角度的直方图 - 检查是否有过多相似角度(AI生成特征)
511
- hist, _ = np.histogram(angles, bins=18, range=(-90, 90))
512
- max_count = np.max(hist)
513
- total_count = np.sum(hist)
514
- features["clothing_angle_uniformity"] = float(max_count / max(total_count, 1))
515
-
516
- # 分析纹理的一致性
517
- # 将图像分成小块,计算每个块的纹理特征
518
- block_size = 32
519
- h, w = gray.shape
520
- texture_variations = []
521
-
522
- for i in range(0, h-block_size, block_size):
523
- for j in range(0, w-block_size, block_size):
524
- block = gray[i:i+block_size, j:j+block_size]
525
- # 计算局部LBP特征或简单的方差
526
- texture_variations.append(np.var(block))
527
-
528
- if texture_variations:
529
- # 计算纹理变化的标准差 - 真实衣物纹理变化更自然
530
- features["clothing_texture_std"] = float(np.std(texture_variations))
531
-
532
- # 计算纹理变化的均值 - AI生成的衣物纹理通常变化较小
533
- features["clothing_texture_mean"] = float(np.mean(texture_variations))
534
-
535
- # 计算纹理变化的熵 - 真实衣物纹理变化的熵更高
536
- features["clothing_texture_entropy"] = float(stats.entropy(texture_variations + 1e-10))
537
-
538
- # 褶皱分析 - 使用形态学操作提取可能的褶皱
539
- _, thresh = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
540
- kernel = np.ones((3,3), np.uint8)
541
- opening = cv2.morphologyEx(thresh, cv2.MORPH_OPEN, kernel)
542
-
543
- # 寻找轮廓
544
- contours, _ = cv2.findContours(opening, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
545
-
546
- # 分析轮廓的复杂度
547
- if contours:
548
- contour_complexities = []
549
- for contour in contours:
550
- area = cv2.contourArea(contour)
551
- if area > 100: # 忽略太小的轮廓
552
- perimeter = cv2.arcLength(contour, True)
553
- complexity = perimeter / max(np.sqrt(area), 1)
554
- contour_complexities.append(complexity)
555
-
556
- if contour_complexities:
557
- features["clothing_contour_complexity"] = float(np.mean(contour_complexities))
558
- features["clothing_contour_std"] = float(np.std(contour_complexities))
559
- except:
560
- # 如果分析失败,不添加衣物特征
561
- pass
562
-
563
- return features
564
-
565
- def analyze_extremities(image):
566
- """分析手指、脚趾等末端细节"""
567
- features = {}
568
-
569
- try:
570
- # 转换为灰度图
571
- if len(image.shape) == 3:
572
- gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
573
- else:
574
- gray = image
575
-
576
- # 使用形态学操作提取可能的手部区域
577
- _, thresh = cv2.threshold(gray, 120, 255, cv2.THRESH_BINARY)
578
- kernel = np.ones((5,5), np.uint8)
579
- opening = cv2.morphologyEx(thresh, cv2.MORPH_OPEN, kernel)
580
-
581
- # 寻找轮廓
582
- contours, _ = cv2.findContours(opening, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
583
-
584
- # 分析轮廓的复杂度
585
- if contours:
586
- # 计算轮廓的周长与面积比 - 手指等细节会增加这个比值
587
- perimeter_area_ratios = []
588
- for contour in contours:
589
- area = cv2.contourArea(contour)
590
- if area > 100: # 忽略太小的轮廓
591
- perimeter = cv2.arcLength(contour, True)
592
- ratio = perimeter / max(area, 1)
593
- perimeter_area_ratios.append(ratio)
594
-
595
- if perimeter_area_ratios:
596
- features["extremity_perimeter_area_ratio"] = float(np.mean(perimeter_area_ratios))
597
-
598
- # 计算凸包缺陷 - 手指之间的间隙会产生凸包缺陷
599
- defect_depths = []
600
- for contour in contours:
601
- if len(contour) > 5: # 需要足够多的点来计算凸包
602
- hull = cv2.convexHull(contour, returnPoints=False)
603
- if len(hull) > 3: # 需要至少4个点来计算凸包缺陷
604
- try:
605
- defects = cv2.convexityDefects(contour, hull)
606
- if defects is not None:
607
- for i in range(defects.shape[0]):
608
- _, _, _, depth = defects[i, 0]
609
- defect_depths.append(depth)
610
- except:
611
- pass
612
-
613
- if defect_depths:
614
- features["extremity_defect_depth_mean"] = float(np.mean(defect_depths))
615
- features["extremity_defect_depth_std"] = float(np.std(defect_depths))
616
-
617
- # 分析缺陷的分布 - 真实手指的缺陷分布更自然
618
- defect_hist, _ = np.histogram(defect_depths, bins=10)
619
- features["extremity_defect_entropy"] = float(stats.entropy(defect_hist + 1e-10))
620
-
621
- # 分析轮廓的曲率变化 - 真实手指的曲率变化更自然
622
- curvature_variations = []
623
- for contour in contours:
624
- if len(contour) > 20: # 需要足够多的点来计算曲率
625
- # 简化轮廓以减少噪声
626
- epsilon = 0.01 * cv2.arcLength(contour, True)
627
- approx = cv2.approxPolyDP(contour, epsilon, True)
628
-
629
- # 计算相邻点之间的角度变化
630
- angles = []
631
- for i in range(len(approx)):
632
- p1 = approx[i][0]
633
- p2 = approx[(i+1) % len(approx)][0]
634
- p3 = approx[(i+2) % len(approx)][0]
635
-
636
- # 计算两个向量之间的角度
637
- v1 = p2 - p1
638
- v2 = p3 - p2
639
-
640
- if np.linalg.norm(v1) > 0 and np.linalg.norm(v2) > 0:
641
- cos_angle = np.dot(v1, v2) / (np.linalg.norm(v1) * np.linalg.norm(v2))
642
- cos_angle = np.clip(cos_angle, -1.0, 1.0) # 确保在有效范围内
643
- angle = np.arccos(cos_angle)
644
- angles.append(angle)
645
-
646
- if angles:
647
- curvature_variations.append(np.std(angles))
648
-
649
- if curvature_variations:
650
- features["extremity_curvature_std"] = float(np.mean(curvature_variations))
651
- except:
652
- # 如果分析失败,不添加末端特征
653
- pass
654
-
655
- return features
656
 
657
  def check_ai_specific_features(image_features):
658
- """检查AI生成图像的典型特征,增强对最新AI模型的检测能力"""
659
  ai_score = 0
660
  ai_signs = []
661
 
662
- # 增加对微观纹理的分析权重
663
- if "lbp_entropy" in image_features:
664
- if image_features["lbp_entropy"] < 2.0:
665
- ai_score += 0.4 # 提高权重
666
- ai_signs.append("微观纹理熵极低,典型AI生成特征")
667
- elif image_features["lbp_entropy"] < 3.0:
668
- ai_score += 0.3
669
- ai_signs.append("微观纹理熵异常低")
670
-
671
- # 增加对频率分布各向异性的分析权重
672
- if "freq_anisotropy" in image_features:
673
- if image_features["freq_anisotropy"] < 0.05:
674
- ai_score += 0.4 # 提高权重
675
- ai_signs.append("频率分布各向异性极低,典型AI生成特征")
676
- elif image_features["freq_anisotropy"] < 0.5:
677
- ai_score += 0.3
678
- ai_signs.append("频率分布各向异性异常低")
679
-
680
- # 增加对细节一致性的分析
681
- if "detail_scale_consistency" in image_features and "detail_spatial_std" in image_features:
682
- if image_features["detail_scale_consistency"] < 0.2 and image_features["detail_spatial_std"] < 5:
683
- ai_score += 0.3
684
- ai_signs.append("细节一致性异常,典型AI生成特征")
685
-
686
- # 检查对称性 - AI生成图像通常对称性高
687
- if "horizontal_symmetry" in image_features and "vertical_symmetry" in image_features:
688
- avg_symmetry = (image_features["horizontal_symmetry"] + image_features["vertical_symmetry"]) / 2
689
- if avg_symmetry > 0.8:
690
- ai_score += 0.15
691
- ai_signs.append("图像对称性异常高")
692
- elif avg_symmetry > 0.7:
693
- ai_score += 0.1
694
- ai_signs.append("图像对称性较高")
695
-
696
- # 检查纹理相关性 - AI生成图像通常纹理相关性高
697
- if "texture_correlation" in image_features:
698
- if image_features["texture_correlation"] > 0.95: # 提高阈值
699
- ai_score += 0.15
700
- ai_signs.append("纹理相关性异常高")
701
- elif image_features["texture_correlation"] > 0.9:
702
- ai_score += 0.1
703
- ai_signs.append("纹理相关性较高")
704
-
705
- # 检查边缘与噪声的关系 - AI生成图像通常边缘清晰但噪声不自然
706
- if "edge_density" in image_features and "noise_level" in image_features:
707
- edge_noise_ratio = image_features["edge_density"] / max(image_features["noise_level"], 0.001)
708
- if edge_noise_ratio < 0.01:
709
- ai_score += 0.15
710
- ai_signs.append("边缘与噪声分布不自然")
711
-
712
- # 检查颜色平滑度 - AI生成图像通常颜色过渡更平滑
713
- if "color_std" in image_features and image_features["color_std"] < 10:
714
- ai_score += 0.1
715
- ai_signs.append("颜色过渡异常平滑")
716
-
717
- # 检查颜色熵 - AI生成图像通常颜色熵较低
718
- if "color_entropy" in image_features and image_features["color_entropy"] < 5:
719
- ai_score += 0.15
720
- ai_signs.append("颜色分布熵值异常低")
721
-
722
- # 检查纹理能量 - AI生成图像通常纹理能量分布不自然
723
- if "texture_energy" in image_features and image_features["texture_energy"] < 0.01:
724
- ai_score += 0.15
725
- ai_signs.append("纹理能量分布不自然")
726
-
727
- # 检查频率比例 - AI生成图像通常频率分布不自然
728
- if "freq_ratio" in image_features:
729
- if image_features["freq_ratio"] < 0.1 or image_features["freq_ratio"] > 2.0:
730
- ai_score += 0.1
731
- ai_signs.append("频率分布不自然")
732
-
733
- # 检查噪声频谱 - 真实照片的噪声频谱更自然
734
- if "noise_spectrum_std" in image_features and image_features["noise_spectrum_std"] < 1000:
735
- ai_score += 0.15
736
- ai_signs.append("噪声频谱异常规则")
737
-
738
- # 检查噪声空间分布 - 真实照片的噪声空间分布更自然
739
- if "noise_spatial_std" in image_features and image_features["noise_spatial_std"] < 0.5:
740
- ai_score += 0.15
741
- ai_signs.append("噪声空间分布异常均匀")
742
-
743
- # 检查细节尺度一致性 - AI生成图像的细节尺度一致性通常不够自然
744
- if "detail_scale_consistency" in image_features and image_features["detail_scale_consistency"] < 0.2:
745
- ai_score += 0.15
746
- ai_signs.append("细节尺度变化异常均匀")
747
-
748
- # 检查细节空间分布 - AI生成图像的细节空间分布通常不够自然
749
- if "detail_spatial_std" in image_features and image_features["detail_spatial_std"] < 5:
750
- ai_score += 0.15
751
- ai_signs.append("细节空间分布异常均匀")
752
-
753
- # 检查细节空间熵 - AI生成图像的细节空间熵通常较低
754
- if "detail_spatial_entropy" in image_features and image_features["detail_spatial_entropy"] < 1.5:
755
- ai_score += 0.15
756
- ai_signs.append("细节空间熵异常低")
757
-
758
- # 检查边缘过渡 - AI生成图像的边缘过渡通常不够自然
759
- if "edge_transition_std" in image_features and image_features["edge_transition_std"] < 10:
760
- ai_score += 0.15
761
- ai_signs.append("边缘过渡异常均匀")
762
-
763
- # 检查光影一致性 - AI生成图像的光影一致性通常过高
764
- if "light_direction_consistency" in image_features and image_features["light_direction_consistency"] > 0.7:
765
- ai_score += 0.15
766
- ai_signs.append("光影方向一致性异常高")
767
-
768
- # 检查透视一致性 - AI生成图像的透视一致性通常过高
769
- if "perspective_consistency" in image_features and image_features["perspective_consistency"] > 0.7:
770
- ai_score += 0.15
771
- ai_signs.append("透视一致性异常高")
772
-
773
- # 检查人脸特征 - AI生成的人脸通常有特定特征
774
- if "face_symmetry" in image_features and image_features["face_symmetry"] > 0.8:
775
- ai_score += 0.15
776
- ai_signs.append("人脸对称性异常高")
777
-
778
- if "face_skin_std" in image_features and image_features["face_skin_std"] < 10:
779
- ai_score += 0.2
780
- ai_signs.append("皮肤质感异常均匀")
781
-
782
- # 检查面部纹理变化 - AI生成的面部纹理变化通常不够自然
783
- face_texture_keys = [k for k in image_features.keys() if k.startswith("face_") and k.endswith("_texture_variation")]
784
- if face_texture_keys:
785
- face_texture_variations = [image_features[k] for k in face_texture_keys]
786
- if np.mean(face_texture_variations) < 5:
787
- ai_score += 0.2
788
- ai_signs.append("面部纹理变化异常均匀")
789
-
790
- # 检查衣物特征 - AI生成的衣物通常有特定问题
791
- if "clothing_angle_uniformity" in image_features and image_features["clothing_angle_uniformity"] > 0.3:
792
- ai_score += 0.2
793
- ai_signs.append("衣物褶皱角度分布不自然")
794
-
795
- if "clothing_texture_std" in image_features and image_features["clothing_texture_std"] < 100:
796
- ai_score += 0.15
797
- ai_signs.append("衣物纹理变化异常均匀")
798
-
799
- if "clothing_texture_entropy" in image_features and image_features["clothing_texture_entropy"] < 1.5:
800
- ai_score += 0.15
801
- ai_signs.append("衣物纹理熵异常低")
802
-
803
- if "clothing_contour_complexity" in image_features and image_features["clothing_contour_complexity"] < 5:
804
- ai_score += 0.15
805
- ai_signs.append("衣���轮廓复杂度异常低")
806
-
807
- # 检查手部特征 - AI生成的手部通常有特定问题
808
- if "extremity_perimeter_area_ratio" in image_features:
809
- if image_features["extremity_perimeter_area_ratio"] < 0.05:
810
- ai_score += 0.2
811
- ai_signs.append("手部/末端轮廓异常平滑")
812
-
813
- if "extremity_defect_depth_std" in image_features and image_features["extremity_defect_depth_std"] < 10:
814
- ai_score += 0.15
815
- ai_signs.append("手指间隙异常均匀")
816
-
817
- if "extremity_defect_entropy" in image_features and image_features["extremity_defect_entropy"] < 1.0:
818
- ai_score += 0.15
819
- ai_signs.append("手指间隙分布熵异常低")
820
-
821
- if "extremity_curvature_std" in image_features and image_features["extremity_curvature_std"] < 0.2:
822
- ai_score += 0.15
823
- ai_signs.append("手部曲率变化异常均匀")
824
-
825
- # 特别关注最新AI模型的特征组合
826
- # 当多个特征同时出现时,这是强有力的AI生成证据
827
- ai_feature_count = len(ai_signs)
828
- if ai_feature_count >= 5: # 如果检测到多个AI特征
829
- ai_score = max(ai_score, 0.9) # 确保AI分数很高
830
- elif ai_feature_count >= 3:
831
  ai_score = max(ai_score, 0.7)
832
 
833
  return min(ai_score, 1.0), ai_signs
834
 
835
  def detect_beauty_filter_signs(image_features):
836
- """检测美颜滤镜痕迹"""
837
  beauty_score = 0
838
  beauty_signs = []
839
 
840
- # 检查皮肤质感
841
  if "face_skin_std" in image_features:
842
  if image_features["face_skin_std"] < 15:
843
  beauty_score += 0.3
844
- beauty_signs.append("皮肤质感过于均匀,典型美颜特征")
845
- elif image_features["face_skin_std"] < 25:
846
- beauty_score += 0.2
847
- beauty_signs.append("皮肤质感较为均匀,可能使用了美颜")
848
 
849
- # 检查局部对比度 - 美颜通常会降低局部对比度
850
- if "face_local_contrast" in image_features:
851
- if image_features["face_local_contrast"] < 5:
852
- beauty_score += 0.2
853
- beauty_signs.append("面部局部对比度低,典型美颜特征")
854
-
855
- # 检查边缘平滑度 - 美颜通常会平滑边缘
856
  if "edge_density" in image_features:
857
  if image_features["edge_density"] < 0.03:
858
  beauty_score += 0.2
859
- beauty_signs.append("边缘过于平滑,典型美颜特征")
860
- elif image_features["edge_density"] < 0.05:
861
- beauty_score += 0.1
862
- beauty_signs.append("边缘较为平滑,可能使用了美颜")
863
 
864
- # 检查噪点 - 美颜通常会减少噪点
865
  if "noise_level" in image_features:
866
  if image_features["noise_level"] < 1.0:
867
  beauty_score += 0.2
868
- beauty_signs.append("噪点异常少,典型美颜特征")
869
- elif image_features["noise_level"] < 2.0:
870
- beauty_score += 0.1
871
- beauty_signs.append("噪点较少,可能使用了美颜")
872
-
873
- # 检查人脸对称性 - 美颜通常会增加对称性
874
- if "face_symmetry" in image_features:
875
- if image_features["face_symmetry"] > 0.8:
876
- beauty_score += 0.2
877
- beauty_signs.append("面部对称性异常高,典型美颜特征")
878
- elif image_features["face_symmetry"] > 0.7:
879
- beauty_score += 0.1
880
- beauty_signs.append("面部对称性较高,可能使用了美颜")
881
-
882
- # 检查面部纹理变化 - 美颜通常会使面部纹理更均匀
883
- face_texture_keys = [k for k in image_features.keys() if k.startswith("face_") and k.endswith("_texture_variation")]
884
- if face_texture_keys:
885
- face_texture_variations = [image_features[k] for k in face_texture_keys]
886
- if np.mean(face_texture_variations) < 10:
887
- beauty_score += 0.2
888
- beauty_signs.append("面部纹理变化异常均匀,典型美颜特征")
889
-
890
- # 检查边缘过渡 - 美颜通常会使边缘过渡更平滑
891
- if "edge_transition_std" in image_features and image_features["edge_transition_std"] < 15:
892
- beauty_score += 0.2
893
- beauty_signs.append("边缘过渡异常平滑,典型美颜特征")
894
 
895
  return min(beauty_score, 1.0), beauty_signs
896
 
897
  def detect_photoshop_signs(image_features):
898
- """检测图像中的PS痕迹"""
899
  ps_score = 0
900
  ps_signs = []
901
 
902
- # 检查皮肤质感
903
  if "texture_homogeneity" in image_features:
904
  if image_features["texture_homogeneity"] > 0.4:
905
  ps_score += 0.2
906
  ps_signs.append("皮肤质感过于均匀")
907
- elif image_features["texture_homogeneity"] > 0.3:
908
- ps_score += 0.1
909
- ps_signs.append("皮肤质感较为均匀")
910
 
911
- # 检查边缘不自然
912
  if "edge_density" in image_features:
913
  if image_features["edge_density"] < 0.01:
914
  ps_score += 0.2
915
  ps_signs.append("边缘过于平滑")
916
- elif image_features["edge_density"] < 0.03:
917
- ps_score += 0.1
918
- ps_signs.append("边缘较为平滑")
919
 
920
- # 检查颜色不自然
921
  if "color_std" in image_features:
922
  if image_features["color_std"] > 50:
923
  ps_score += 0.2
924
  ps_signs.append("颜色分布极不自然")
925
- elif image_features["color_std"] > 30:
926
- ps_score += 0.1
927
- ps_signs.append("颜色分布略不自然")
928
-
929
- # 检查噪点不一致
930
- if "noise_level" in image_features and "noise_std" in image_features:
931
- noise_ratio = image_features["noise_std"] / max(image_features["noise_level"], 0.001)
932
- if noise_ratio < 0.5:
933
- ps_score += 0.2
934
- ps_signs.append("噪点分布不自然")
935
- elif noise_ratio < 0.7:
936
- ps_score += 0.1
937
- ps_signs.append("噪点分布略不自然")
938
-
939
- # 检查频率分布不自然
940
- if "freq_ratio" in image_features:
941
- if image_features["freq_ratio"] < 0.2:
942
- ps_score += 0.2
943
- ps_signs.append("频率分布不自然,可能有过度模糊处理")
944
- elif image_features["freq_ratio"] > 2.0:
945
- ps_score += 0.2
946
- ps_signs.append("频率分布不自然,可能有过度锐化处理")
947
-
948
- # 检查细节不一致
949
- if "detail_spatial_std" in image_features and image_features["detail_spatial_std"] > 50:
950
- ps_score += 0.2
951
- ps_signs.append("图像细节分布不一致,可能有局部修饰")
952
-
953
- # 检查边缘过渡不自然
954
- if "edge_transition_std" in image_features:
955
- if image_features["edge_transition_std"] > 50:
956
- ps_score += 0.2
957
- ps_signs.append("边缘过渡不自然,可能有选区修饰")
958
- elif image_features["edge_transition_std"] < 5:
959
- ps_score += 0.2
960
- ps_signs.append("边缘过渡过于平滑,可能有过度修饰")
961
 
962
  return min(ps_score, 1.0), ps_signs
963
- # 在这里添加get_detailed_analysis函数
964
- def get_detailed_analysis(ai_probability, ps_score, beauty_score, ps_signs, ai_signs, beauty_signs, valid_models_count, ai_feature_score, image_features=None):
965
- """提供更详细的分析结果,使用二级分类框架,优先考虑AI特征分析"""
966
 
967
- # 根据有效模型数量调整置信度描述
968
  confidence_prefix = ""
969
  if valid_models_count >= 3:
970
  confidence_prefix = "极高置信度:"
@@ -973,53 +599,23 @@ def get_detailed_analysis(ai_probability, ps_score, beauty_score, ps_signs, ai_s
973
  elif valid_models_count == 1:
974
  confidence_prefix = "中等置信度:"
975
 
976
- # 特征与模型判断严重不一致时的处理
977
  if ai_feature_score > 0.8 and ai_probability < 0.6:
978
- ai_probability = max(0.8, ai_probability) # 当AI特征分数非常高时,覆盖模型判断
979
  category = confidence_prefix + "AI生成图像(基于特征分析)"
980
  description = "基于多种典型AI特征分析,该图像很可能是AI生成的,尽管模型判断结果不确定。"
981
  main_category = "AI生成"
982
  elif ai_feature_score > 0.6 and ai_probability < 0.5:
983
- ai_probability = max(0.7, ai_probability) # 当AI特征分数高时,提高AI概率
984
 
985
- # 特定关键特征的硬性覆盖
986
- if image_features is not None:
987
- if "lbp_entropy" in image_features and image_features["lbp_entropy"] < 2.0:
988
- if "freq_anisotropy" in image_features and image_features["freq_anisotropy"] < 0.05:
989
- # 当微观纹理熵极低且频率分布各向异性极低时,几乎可以确定是AI生成
990
- ai_probability = 0.95
991
- category = confidence_prefix + "AI生成图像(确定)"
992
- description = "检测到多个决定性AI生成特征,该图像几乎可以确定是AI生成的。"
993
- main_category = "AI生成"
994
-
995
- # 添加具体的PS痕迹描述
996
- if ps_signs:
997
- ps_details = "检测到的修图痕迹:" + "、".join(ps_signs)
998
- else:
999
- ps_details = "未检测到明显的修图痕迹。"
1000
-
1001
- # 添加AI特征描述
1002
- if ai_signs:
1003
- ai_details = "检测到的AI特征:" + "、".join(ai_signs)
1004
- else:
1005
- ai_details = "未检测到明显的AI生成特征。"
1006
-
1007
- # 添加美颜特征描述
1008
- if beauty_signs:
1009
- beauty_details = "检测到的美颜特征:" + "、".join(beauty_signs)
1010
- else:
1011
- beauty_details = "未检测到明显的美颜特征。"
1012
-
1013
- return category, description, ps_details, ai_details, beauty_details, main_category
1014
-
1015
- # 第一级分类:AI生成 vs 真人照片
1016
- if ai_probability > 0.6: # 降低AI判定阈值,提高AI检出率
1017
  category = confidence_prefix + "AI生成图像"
1018
  description = "图像很可能是由AI完全生成,几乎没有真人照片的特征。"
1019
  main_category = "AI生成"
1020
  else:
1021
- # 第二级分类:真实素人 vs 修图痕迹明显
1022
- combined_edit_score = max(ps_score, beauty_score) # 取PS和美颜中的较高分
1023
 
1024
  if combined_edit_score > 0.5:
1025
  category = confidence_prefix + "真人照片,修图痕迹明显"
@@ -1030,148 +626,111 @@ def get_detailed_analysis(ai_probability, ps_score, beauty_score, ps_signs, ai_s
1030
  description = "图像很可能是未经大量处理的真人照片,保留了自然的细节和特征。"
1031
  main_category = "真人照片-素人"
1032
 
1033
- # 处理边界情况 - 当AI概率和修图分数都很高时
1034
  if ai_probability > 0.45 and combined_edit_score > 0.7:
1035
- # 这是一个边界情况,可能是高度修图的真人照片,也可能是AI生成的
1036
  category = confidence_prefix + "真人照片,修图痕迹明显(也可能是AI生成)"
1037
  description = "图像可能是真人照片经过大量后期处理,也可能是AI生成图像。由于现代AI技术与高度修图效果相似,难以完全区分。"
1038
  main_category = "真人照片-修图明显"
1039
 
1040
- # 添加具体的PS痕迹描述
1041
- if ps_signs:
1042
- ps_details = "检测到的修图痕迹:" + "、".join(ps_signs)
1043
- else:
1044
- ps_details = "未检测到明显的修图痕迹。"
1045
-
1046
- # 添加AI特征描述
1047
- if ai_signs:
1048
- ai_details = "检测到的AI特征:" + "、".join(ai_signs)
1049
- else:
1050
- ai_details = "未检测到明显的AI生成特征。"
1051
-
1052
- # 添加美颜特征描述
1053
- if beauty_signs:
1054
- beauty_details = "检测到的美颜特征:" + "、".join(beauty_signs)
1055
- else:
1056
- beauty_details = "未检测到明显的美颜特征。"
1057
 
1058
  return category, description, ps_details, ai_details, beauty_details, main_category
1059
 
1060
  def detect_ai_image(image):
1061
- """主检测函数"""
1062
  if image is None:
1063
  return {"error": "未提供图像"}
1064
 
 
 
 
1065
  results = {}
1066
  valid_models = 0
1067
  weighted_ai_probability = 0
1068
 
1069
- # 使用每个模型进行预测
1070
- for key, model_info in models.items():
1071
- if model_info["processor"] is not None and model_info["model"] is not None:
1072
- try:
1073
- # 处理图像
1074
- inputs = model_info["processor"](images=image, return_tensors="pt")
1075
- with torch.no_grad():
1076
- outputs = model_info["model"](**inputs)
1077
-
1078
- # 获取概率
1079
- probabilities = torch.nn.functional.softmax(outputs.logits, dim=-1)
1080
-
1081
- # 使用适配器处理不同模型的输出
1082
- ai_probability = process_model_output(model_info, outputs, probabilities)
1083
-
1084
- # 添加到结果
1085
- predicted_class_idx = outputs.logits.argmax(-1).item()
1086
- results[key] = {
1087
- "model_name": model_info["name"],
1088
- "ai_probability": ai_probability,
1089
- "predicted_class": model_info["model"].config.id2label[predicted_class_idx]
1090
- }
1091
-
1092
- # 累加加权概率
1093
- weighted_ai_probability += ai_probability * model_info["weight"]
1094
- valid_models += 1
1095
-
1096
- except Exception as e:
1097
- results[key] = {
1098
- "model_name": model_info["name"],
1099
- "error": str(e)
1100
- }
1101
 
1102
  # 计算最终加权概率
1103
  if valid_models > 0:
1104
- final_ai_probability = weighted_ai_probability / sum(m["weight"] for k, m in models.items() if m["processor"] is not None and m["model"] is not None)
 
 
 
1105
  else:
1106
  return {"error": "所有模型加载失败"}
1107
 
1108
- # 分析图像特征
1109
- image_features = analyze_image_features(image)
 
 
 
1110
 
1111
- # 检查AI特定特征
1112
- ai_feature_score, ai_signs = check_ai_specific_features(image_features)
 
1113
 
1114
- # 分析PS痕迹
 
1115
  ps_score, ps_signs = detect_photoshop_signs(image_features)
1116
-
1117
- # 分析美颜痕迹
1118
  beauty_score, beauty_signs = detect_beauty_filter_signs(image_features)
1119
 
1120
- # 应用特征权重调整AI概率
1121
  adjusted_probability = final_ai_probability
1122
 
1123
- # 提高AI特征分数的权重
1124
- if ai_feature_score > 0.8: # 当AI特征非常明显时
1125
- adjusted_probability = max(adjusted_probability, 0.8) # 大幅提高AI概率
1126
  elif ai_feature_score > 0.6:
1127
  adjusted_probability = max(adjusted_probability, 0.7)
1128
  elif ai_feature_score > 0.4:
1129
  adjusted_probability = max(adjusted_probability, 0.6)
1130
 
1131
- # 特别关注关键AI特征
1132
  key_ai_features_count = 0
1133
 
1134
- # 检查微观纹理熵 - 这是AI生成的强力指标
1135
  if "lbp_entropy" in image_features and image_features["lbp_entropy"] < 2.5:
1136
  key_ai_features_count += 1
1137
  adjusted_probability += 0.1
1138
 
1139
- # 检查频率分布各向异性 - 这是AI生成的强力指标
1140
  if "freq_anisotropy" in image_features and image_features["freq_anisotropy"] < 0.1:
1141
  key_ai_features_count += 1
1142
  adjusted_probability += 0.1
1143
 
1144
- # 检查细节空间分布 - 这是AI生成的强力指标
1145
  if "detail_spatial_std" in image_features and image_features["detail_spatial_std"] < 5:
1146
  key_ai_features_count += 1
1147
  adjusted_probability += 0.1
1148
 
1149
- # 如果多个关键AI特征同时存在,这是强有力的AI生成证据
1150
  if key_ai_features_count >= 2:
1151
  adjusted_probability = max(adjusted_probability, 0.7)
1152
 
1153
- # 降低美颜特征对AI判断的影响
1154
- # 即使美颜分数高,如果AI特征也明显,仍应判定为AI生成
1155
- if beauty_score > 0.6 and ai_feature_score > 0.7:
1156
- # 不再降低AI概率,而是保持较高的AI概率
1157
- pass
1158
-
1159
- # 如果检测到衣物或手部异常,大幅提高AI概率
1160
- if "clothing_angle_uniformity" in image_features and image_features["clothing_angle_uniformity"] > 0.3:
1161
- adjusted_probability = max(adjusted_probability, 0.7)
1162
-
1163
- if "extremity_perimeter_area_ratio" in image_features and image_features["extremity_perimeter_area_ratio"] < 0.05:
1164
- adjusted_probability = max(adjusted_probability, 0.7)
1165
-
1166
- # 确保概率在0-1范围内
1167
  adjusted_probability = min(1.0, max(0.0, adjusted_probability))
1168
 
1169
- # 获取详细分析
1170
  category, description, ps_details, ai_details, beauty_details, main_category = get_detailed_analysis(
1171
- adjusted_probability, ps_score, beauty_score, ps_signs, ai_signs, beauty_signs, valid_models, ai_feature_score, image_features
 
1172
  )
1173
 
1174
  # 构建最终结果
 
 
1175
  final_result = {
1176
  "ai_probability": adjusted_probability,
1177
  "original_ai_probability": final_ai_probability,
@@ -1184,11 +743,13 @@ def detect_ai_image(image):
1184
  "ps_details": ps_details,
1185
  "ai_details": ai_details,
1186
  "beauty_details": beauty_details,
 
1187
  "individual_model_results": results,
1188
- "features": image_features
 
1189
  }
1190
 
1191
- # 返回两个值:JSON结果和Label数据
1192
  label_data = {main_category: 1.0}
1193
  return final_result, label_data
1194
 
@@ -1200,10 +761,12 @@ iface = gr.Interface(
1200
  gr.JSON(label="详细分析结果"),
1201
  gr.Label(label="主要分类", num_top_classes=1)
1202
  ],
1203
- title="增强型AI图像检测API",
1204
  description="多模型集成检测图像是否由AI生成或真人照片(素人/修图)",
1205
  examples=None,
1206
  allow_flagging="never"
1207
  )
1208
 
1209
- iface.launch()
 
 
 
1
  import gradio as gr
2
  import torch
 
3
  import numpy as np
4
  import cv2
5
+ from PIL import Image
6
+ import time
7
+ import os
8
+ from scipy import stats
9
  from skimage.feature import graycomatrix, graycoprops, local_binary_pattern
10
+ from transformers import AutoImageProcessor, AutoModelForImageClassification
11
+ from functools import lru_cache
12
 
13
+ # 设置缓存目录
14
+ os.environ["TRANSFORMERS_CACHE"] = "/tmp/transformers_cache"
15
+ os.environ["HF_HOME"] = "/tmp/hf_home"
16
+ os.makedirs("/tmp/transformers_cache", exist_ok=True)
17
+ os.makedirs("/tmp/hf_home", exist_ok=True)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
18
 
19
+ #############################################
20
+ # 模型管理部分 (原 optimized_model_loading.py)
21
+ #############################################
 
 
 
 
 
 
 
22
 
23
+ class ModelManager:
24
+ """Manages model loading, caching and inference"""
25
+
26
+ def __init__(self):
27
+ self.models = {
28
+ "model1": {
29
+ "name": "umm-maybe/AI-image-detector",
30
+ "processor": None,
31
+ "model": None,
32
+ "weight": 0.5
33
+ },
34
+ "model2": {
35
+ "name": "microsoft/resnet-50",
36
+ "processor": None,
37
+ "model": None,
38
+ "weight": 0.25
39
+ },
40
+ "model3": {
41
+ "name": "google/vit-base-patch16-224",
42
+ "processor": None,
43
+ "model": None,
44
+ "weight": 0.25
45
+ }
46
+ }
47
+ self.loaded_models = set()
48
+
49
+ def load_model(self, key):
50
+ """Lazy load a specific model only when needed"""
51
+ if key not in self.models:
52
+ return False
53
+
54
+ if key in self.loaded_models:
55
+ return True
56
+
57
+ try:
58
+ model_info = self.models[key]
59
+ model_info["processor"] = AutoImageProcessor.from_pretrained(
60
+ model_info["name"],
61
+ cache_dir="/tmp/transformers_cache"
62
+ )
63
+
64
+ # 加载模型
65
+ model_info["model"] = AutoModelForImageClassification.from_pretrained(
66
+ model_info["name"],
67
+ cache_dir="/tmp/transformers_cache"
68
+ )
69
+
70
+ # 量化模型以减少内存使用并加速推理
71
+ if torch.cuda.is_available():
72
+ model_info["model"].to('cuda')
73
+ else:
74
+ # 尝试量化,如果失败则使用原始模型
75
+ try:
76
+ model_info["model"] = torch.quantization.quantize_dynamic(
77
+ model_info["model"], {torch.nn.Linear}, dtype=torch.qint8
78
+ )
79
+ except Exception as e:
80
+ print(f"量化失败,使用原始模型: {str(e)}")
81
+
82
+ self.loaded_models.add(key)
83
+ print(f"成功加载模型: {model_info['name']}")
84
+ return True
85
+ except Exception as e:
86
+ print(f"加载模型失败 {self.models[key]['name']}: {str(e)}")
87
+ return False
88
+
89
+ def get_model_prediction(self, key, image):
90
+ """Get prediction from a specific model"""
91
+ if not self.load_model(key):
92
+ return None
93
+
94
+ model_info = self.models[key]
95
 
96
+ try:
97
+ # 处理图像
98
+ inputs = model_info["processor"](images=image, return_tensors="pt")
99
+
100
+ # 如果有GPU则使用
101
+ if torch.cuda.is_available():
102
+ inputs = {k: v.to('cuda') for k, v in inputs.items()}
103
+
104
+ # 推理
105
+ with torch.no_grad():
106
+ outputs = model_info["model"](**inputs)
107
+
108
+ # 获取概率
109
+ probabilities = torch.nn.functional.softmax(outputs.logits, dim=-1)
110
+
111
+ # 处理输出
112
+ ai_probability = self.process_model_output(model_info, outputs, probabilities)
113
+
114
+ # 返回结果
115
+ predicted_class_idx = outputs.logits.argmax(-1).item()
116
+ return {
117
+ "model_name": model_info["name"],
118
+ "ai_probability": ai_probability,
119
+ "predicted_class": model_info["model"].config.id2label[predicted_class_idx]
120
+ }
121
+
122
+ except Exception as e:
123
+ return {
124
+ "model_name": model_info["name"],
125
+ "error": str(e)
126
+ }
127
+
128
+ def process_model_output(self, model_info, outputs, probabilities):
129
+ """Process different model outputs, return AI generation probability"""
130
+ model_name = model_info["name"].lower()
131
+
132
+ # 处理AI-image-detector模型
133
+ if "ai-image-detector" in model_name:
134
+ ai_label_idx = None
135
+ human_label_idx = None
136
+
137
+ for idx, label in model_info["model"].config.id2label.items():
138
+ label_lower = label.lower()
139
+ if "ai" in label_lower or "generated" in label_lower or "fake" in label_lower:
140
+ ai_label_idx = idx
141
+ if "human" in label_lower or "real" in label_lower:
142
+ human_label_idx = idx
143
+
144
+ if human_label_idx is not None:
145
+ ai_probability = 1 - float(probabilities[0][human_label_idx].item())
146
+ elif ai_label_idx is not None:
147
+ ai_probability = float(probabilities[0][ai_label_idx].item())
148
+ else:
149
+ ai_probability = 0.5
150
+
151
+ return ai_probability
152
 
153
+ # 处理通用图像分类模型
 
 
 
 
154
  predicted_class_idx = outputs.logits.argmax(-1).item()
 
155
  predicted_class = model_info["model"].config.id2label[predicted_class_idx].lower()
156
 
157
+ # 检查AI相关关键词
158
  ai_keywords = ["artificial", "generated", "synthetic", "fake", "computer"]
159
  for keyword in ai_keywords:
160
  if keyword in predicted_class:
161
  return float(probabilities[0][predicted_class_idx].item())
162
 
163
+ # 默认处理
164
+ if "ai" in predicted_class or "generated" in predicted_class or "fake" in predicted_class:
165
+ return float(probabilities[0][predicted_class_idx].item())
166
+ else:
167
+ return 1 - float(probabilities[0][predicted_class_idx].item())
168
+ #############################################
169
+ # 特征提取部分 (原 optimized_feature_extraction.py)
170
+ #############################################
 
 
 
 
 
171
 
172
+ class FeatureExtractor:
173
+ """Optimized image feature extraction"""
174
+
175
+ def __init__(self):
176
+ # 中间结果缓存
177
+ self.cache = {}
178
+
179
+ def clear_cache(self):
180
+ """Clear the cache between images"""
181
+ self.cache = {}
182
+
183
+ @lru_cache(maxsize=8)
184
+ def get_grayscale(self, image_id):
185
+ """Get grayscale version of image with caching"""
186
+ img_cv = self.cache.get('img_cv')
187
+ if img_cv is None:
188
+ return None
189
+
190
+ if len(img_cv.shape) == 3:
191
+ gray = cv2.cvtColor(img_cv, cv2.COLOR_BGR2GRAY)
192
+ else:
193
+ gray = img_cv
194
+ return gray
195
+
196
+ def analyze_image_features(self, image, downscale_factor=1.0):
197
+ """Extract image features with optimizations"""
198
+ # 转换为OpenCV格式
199
+ img_array = np.array(image)
200
+ if len(img_array.shape) == 3 and img_array.shape[2] == 3:
201
+ img_cv = cv2.cvtColor(img_array, cv2.COLOR_RGB2BGR)
202
+ else:
203
+ img_cv = img_array
204
+
205
+ # 存入缓存
206
+ self.cache['img_cv'] = img_cv
207
+
208
+ # 如果需要,对大图像进行降采样处理
209
+ if downscale_factor < 1.0:
210
+ h, w = img_cv.shape[:2]
211
+ new_h, new_w = int(h * downscale_factor), int(w * downscale_factor)
212
+ img_cv = cv2.resize(img_cv, (new_w, new_h))
213
+ self.cache['img_cv'] = img_cv
214
+
215
+ # 为图像创建唯一ID (用于lru_cache)
216
+ image_id = id(image)
217
+ self.cache['image_id'] = image_id
218
+
219
+ features = {}
220
+
221
+ # 基本特征
222
+ features["width"] = image.width
223
+ features["height"] = image.height
224
+ features["aspect_ratio"] = image.width / max(1, image.height)
225
+
226
+ # 并行提取不同组的特征
227
+ self._extract_color_features(img_array, features)
228
+ self._extract_edge_features(img_cv, features, image_id)
229
+ self._extract_texture_features(img_cv, features, image_id)
230
+ self._extract_noise_features(img_cv, features)
231
+ self._extract_symmetry_features(img_cv, features)
232
+ self._extract_frequency_features(img_cv, features, image_id)
233
+
234
+ return features
235
+
236
+ def _extract_color_features(self, img_array, features):
237
+ """Extract color-related features"""
238
+ if len(img_array.shape) == 3:
239
+ # 使用向量化操作提高速度
240
+ features["avg_red"] = float(np.mean(img_array[:,:,0]))
241
+ features["avg_green"] = float(np.mean(img_array[:,:,1]))
242
+ features["avg_blue"] = float(np.mean(img_array[:,:,2]))
243
+
244
+ # 颜色标准差
245
+ features["color_std"] = float(np.std([
246
+ features["avg_red"],
247
+ features["avg_green"],
248
+ features["avg_blue"]
249
+ ]))
250
+
251
+ # 局部颜色变化 - 使用步进操作提高速度
252
+ block_size = 10
253
+ stride = 10
254
+ h, w = img_array.shape[:2]
255
+
256
+ # 使用数组切片加速处理
257
+ blocks = []
258
+ for i in range(0, h-block_size, stride):
259
+ for j in range(0, w-block_size, stride):
260
+ blocks.append(img_array[i:i+block_size, j:j+block_size])
261
+
262
+ if blocks:
263
+ # 转换为numpy数组进行向量化操作
264
+ blocks_array = np.array(blocks)
265
+ std_values = np.std(blocks_array.reshape(len(blocks), -1), axis=1)
266
+ features["local_color_variation"] = float(np.mean(std_values))
267
+
268
+ # 颜色直方图和熵
269
+ r_hist, _ = np.histogram(img_array[:,:,0].flatten(), bins=64, range=(0, 256))
270
+ g_hist, _ = np.histogram(img_array[:,:,1].flatten(), bins=64, range=(0, 256))
271
+ b_hist, _ = np.histogram(img_array[:,:,2].flatten(), bins=64, range=(0, 256))
272
+
273
+ # 计算熵
274
+ r_entropy = stats.entropy(r_hist + 1e-10)
275
+ g_entropy = stats.entropy(g_hist + 1e-10)
276
+ b_entropy = stats.entropy(b_hist + 1e-10)
277
+ features["color_entropy"] = float((r_entropy + g_entropy + b_entropy) / 3)
278
+
279
+ def _extract_edge_features(self, img_cv, features, image_id):
280
+ """Extract edge-related features"""
281
+ # 获取灰度图
282
+ gray = self.get_grayscale(image_id)
283
+ if gray is None:
284
+ if len(img_cv.shape) == 3:
285
+ gray = cv2.cvtColor(img_cv, cv2.COLOR_BGR2GRAY)
286
+ else:
287
+ gray = img_cv
288
+ self.cache['gray'] = gray
289
+
290
+ # 边缘检测
291
+ edges = cv2.Canny(gray, 100, 200)
292
+ self.cache['edges'] = edges
293
+ features["edge_density"] = float(np.sum(edges > 0) / (img_cv.shape[0] * img_cv.shape[1]))
294
+
295
+ # 边缘方向分析
296
  sobelx = cv2.Sobel(gray, cv2.CV_64F, 1, 0, ksize=3)
297
  sobely = cv2.Sobel(gray, cv2.CV_64F, 0, 1, ksize=3)
298
+ self.cache['sobelx'] = sobelx
299
+ self.cache['sobely'] = sobely
300
+
301
  edge_magnitude = np.sqrt(sobelx**2 + sobely**2)
302
+ self.cache['edge_magnitude'] = edge_magnitude
303
  features["edge_variance"] = float(np.var(edge_magnitude))
304
 
305
+ # 边缘方向分布
306
  edge_direction = np.arctan2(sobely, sobelx) * 180 / np.pi
307
+ self.cache['edge_direction'] = edge_direction
308
+
309
+ # 只在显著边缘上计算以提高速度
310
+ mask = edge_magnitude > 30
311
+ if np.sum(mask) > 0:
312
+ edge_dir_hist, _ = np.histogram(edge_direction[mask], bins=18, range=(-180, 180))
313
+ features["edge_direction_entropy"] = float(stats.entropy(edge_dir_hist + 1e-10))
314
+
315
+ def _extract_texture_features(self, img_cv, features, image_id):
316
+ """Extract texture-related features"""
317
+ # 获取灰度图
318
+ gray = self.cache.get('gray')
319
+ if gray is None:
320
+ gray = self.get_grayscale(image_id)
321
+ if gray is None:
322
+ if len(img_cv.shape) == 3:
323
+ gray = cv2.cvtColor(img_cv, cv2.COLOR_BGR2GRAY)
324
+ else:
325
+ gray = img_cv
326
+ self.cache['gray'] = gray
327
 
328
+ # 减少GLCM计算量,使用更少的角度和距离
329
  distances = [5]
330
+ angles = [0, np.pi/2] # 4个角度减少到2个
331
+
332
+ # 如果图像较大,对GLCM计算进行降采样
333
+ h, w = gray.shape
334
+ if h > 512 or w > 512:
335
+ gray_small = cv2.resize(gray, (min(w, 512), min(h, 512)))
336
+ else:
337
+ gray_small = gray
338
+
339
+ # 转换为uint8并缩放到0-255用于GLCM
340
+ if gray_small.dtype != np.uint8:
341
+ gray_small = ((gray_small - gray_small.min()) /
342
+ (gray_small.max() - gray_small.min() + 1e-10) * 255).astype(np.uint8)
343
 
344
+ # 使用更少的灰度级以加速计算
345
+ gray_small = (gray_small // 8) * 8 # 减少到32个灰度级
 
 
 
 
 
346
 
347
+ # 计算GLCM
348
  try:
349
+ glcm = graycomatrix(gray_small, distances=distances, angles=angles,
350
+ symmetric=True, normed=True)
351
+
352
+ # 计算GLCM属性
353
+ features["texture_contrast"] = float(np.mean(graycoprops(glcm, 'contrast')[0]))
354
+ features["texture_homogeneity"] = float(np.mean(graycoprops(glcm, 'homogeneity')[0]))
355
+ features["texture_correlation"] = float(np.mean(graycoprops(glcm, 'correlation')[0]))
356
+ features["texture_energy"] = float(np.mean(graycoprops(glcm, 'energy')[0]))
357
+ except Exception as e:
358
+ print(f"GLCM计算错误: {e}")
359
+
360
+ # LBP用于微观纹理分析 - 对AI检测至关重要
361
+ try:
362
+ # 使用更小的半径和更少的点以提高速度
363
+ radius = 2 # 从3减少到2
364
+ n_points = 8 # 从24减少到8
365
+
366
+ # 如果需要,对LBP进行降采样
367
+ if h > 512 or w > 512:
368
+ if 'gray_small' not in locals():
369
+ gray_small = cv2.resize(gray, (min(w, 512), min(h, 512)))
370
+ else:
371
+ gray_small = gray
372
+
373
+ lbp = local_binary_pattern(gray_small, n_points, radius, method='uniform')
374
  lbp_hist, _ = np.histogram(lbp, bins=n_points + 2, range=(0, n_points + 2))
375
+ lbp_hist = lbp_hist.astype(float) / (sum(lbp_hist) + 1e-10)
376
  features["lbp_entropy"] = float(stats.entropy(lbp_hist + 1e-10))
377
+ except Exception as e:
378
+ print(f"LBP计算错误: {e}")
379
+
380
+ def _extract_noise_features(self, img_cv, features):
381
+ """Extract noise-related features"""
382
+ if len(img_cv.shape) == 3:
383
+ # 使用更小的核以加速模糊
384
+ blurred = cv2.GaussianBlur(img_cv, (3, 3), 0)
385
+ noise = cv2.absdiff(img_cv, blurred)
386
+ features["noise_level"] = float(np.mean(noise))
387
+ features["noise_std"] = float(np.std(noise))
388
+
389
+ # 噪声频谱分析 - 只使用一个通道以提高速度
390
+ noise_fft = np.fft.fft2(noise[:,:,0])
391
+ noise_fft_shift = np.fft.fftshift(noise_fft)
392
+ noise_magnitude = np.abs(noise_fft_shift)
393
+ features["noise_spectrum_std"] = float(np.std(noise_magnitude))
394
+
395
+ # 噪声空间一致性 - 使用更大的块以提高速度
396
+ block_size = 64 # 从32增加到64
397
+ h, w = noise.shape[:2]
398
+
399
+ # 使用步进操作
400
+ noise_blocks = []
401
+ for i in range(0, h-block_size, block_size):
402
+ for j in range(0, w-block_size, block_size):
403
+ block = noise[i:i+block_size, j:j+block_size]
404
+ noise_blocks.append(np.mean(block))
405
+
406
+ if noise_blocks:
407
+ features["noise_spatial_std"] = float(np.std(noise_blocks))
408
+
409
+ def _extract_symmetry_features(self, img_cv, features):
410
+ """Extract symmetry-related features"""
411
+ h, w = img_cv.shape[:2]
412
+
413
+ # 水平对称性
414
+ if w % 2 == 0:
415
+ left_half = img_cv[:, :w//2]
416
+ right_half = cv2.flip(img_cv[:, w//2:], 1)
417
+ if left_half.shape == right_half.shape:
418
+ # 对大图像使用采样以加速计算
419
+ if h > 512 or w//2 > 512:
420
+ step = max(1, min(h, w//2) // 512)
421
+ diff = cv2.absdiff(left_half[::step, ::step], right_half[::step, ::step])
422
+ else:
423
+ diff = cv2.absdiff(left_half, right_half)
424
+ h_symmetry = 1 - float(np.mean(diff) / 255)
425
+ features["horizontal_symmetry"] = h_symmetry
426
+
427
+ # 垂直对称性
428
+ if h % 2 == 0:
429
+ top_half = img_cv[:h//2, :]
430
+ bottom_half = cv2.flip(img_cv[h//2:, :], 0)
431
+ if top_half.shape == bottom_half.shape:
432
+ # 对大图像使用采样以加速计算
433
+ if h//2 > 512 or w > 512:
434
+ step = max(1, min(h//2, w) // 512)
435
+ diff = cv2.absdiff(top_half[::step, ::step], bottom_half[::step, ::step])
436
+ else:
437
+ diff = cv2.absdiff(top_half, bottom_half)
438
+ v_symmetry = 1 - float(np.mean(diff) / 255)
439
+ features["vertical_symmetry"] = v_symmetry
440
+
441
+ def _extract_frequency_features(self, img_cv, features, image_id):
442
+ """Extract frequency domain features"""
443
+ # 获取灰度图
444
+ gray = self.cache.get('gray')
445
+ if gray is None:
446
+ gray = self.get_grayscale(image_id)
447
+ if gray is None:
448
+ if len(img_cv.shape) == 3:
449
+ gray = cv2.cvtColor(img_cv, cv2.COLOR_BGR2GRAY)
450
+ else:
451
+ gray = img_cv
452
+ self.cache['gray'] = gray
453
 
454
+ # 如果图像较大,对FFT进行降采样
455
+ h, w = gray.shape
456
+ if h > 512 or w > 512:
457
+ gray_small = cv2.resize(gray, (512, 512))
458
+ else:
459
+ gray_small = gray
460
+
461
+ # FFT分析
462
+ f_transform = np.fft.fft2(gray_small)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
463
  f_shift = np.fft.fftshift(f_transform)
464
  magnitude = np.log(np.abs(f_shift) + 1)
465
 
466
+ # 计算高/低频率比
467
  h, w = magnitude.shape
468
  center_h, center_w = h // 2, w // 2
469
 
470
+ # 低频区域(中心)
471
  low_freq_region = magnitude[center_h-h//8:center_h+h//8, center_w-w//8:center_w+w//8]
472
  low_freq_mean = np.mean(low_freq_region)
473
 
474
+ # 高频区域
475
  high_freq_mean = np.mean(magnitude) - low_freq_mean
 
476
  features["freq_ratio"] = float(high_freq_mean / max(low_freq_mean, 0.001))
477
+ features["freq_std"] = float(np.std(magnitude))
478
 
479
+ # 频率各向异性 - 对AI检测至关重要
480
+ # 使用更少的角度以提高速度
 
 
 
481
  freq_blocks = []
482
+ for angle in range(0, 180, 45): # 从20°步长减少到45°步长
483
  mask = np.zeros_like(magnitude)
484
  cv2.ellipse(mask, (center_w, center_h), (w//2, h//2), angle, -10, 10, 1, -1)
485
  freq_blocks.append(np.mean(magnitude * mask))
486
  features["freq_anisotropy"] = float(np.std(freq_blocks))
487
+ #############################################
488
+ # 分析逻辑部分 (原 optimized_app.py)
489
+ #############################################
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
490
 
491
+ # 初始化管理器
492
+ model_manager = ModelManager()
493
+ feature_extractor = FeatureExtractor()
 
 
 
 
 
494
 
495
+ # 跟踪对AI检测最重要的特征
496
+ CRITICAL_FEATURES = {
497
+ "lbp_entropy": {"threshold": 2.5, "weight": 0.4},
498
+ "freq_anisotropy": {"threshold": 0.1, "weight": 0.4},
499
+ "detail_spatial_std": {"threshold": 5, "weight": 0.3},
500
+ "texture_correlation": {"threshold": 0.9, "weight": 0.15},
501
+ "horizontal_symmetry": {"threshold": 0.7, "weight": 0.1},
502
+ "vertical_symmetry": {"threshold": 0.7, "weight": 0.1},
503
+ "noise_spatial_std": {"threshold": 0.5, "weight": 0.15},
504
+ "freq_ratio": {"threshold": 0.1, "weight": 0.1},
505
+ "noise_spectrum_std": {"threshold": 1000, "weight": 0.15},
506
+ "color_entropy": {"threshold": 5, "weight": 0.15}
507
+ }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
508
 
509
  def check_ai_specific_features(image_features):
510
+ """Optimized check for AI-generated image features"""
511
  ai_score = 0
512
  ai_signs = []
513
 
514
+ # 只处理最关键的特征
515
+ for feature_name, config in CRITICAL_FEATURES.items():
516
+ if feature_name not in image_features:
517
+ continue
518
+
519
+ value = image_features[feature_name]
520
+ threshold = config["threshold"]
521
+ weight = config["weight"]
522
+
523
+ # 不同特征有不同的比较逻辑
524
+ if feature_name in ["lbp_entropy", "freq_anisotropy", "detail_spatial_std",
525
+ "noise_spatial_std", "freq_ratio", "noise_spectrum_std",
526
+ "color_entropy"]:
527
+ if value < threshold:
528
+ ai_score += weight
529
+ ai_signs.append(f"{feature_name} 异常低 ({value:.2f})")
530
+ elif feature_name in ["texture_correlation", "horizontal_symmetry", "vertical_symmetry"]:
531
+ if value > threshold:
532
+ ai_score += weight
533
+ ai_signs.append(f"{feature_name} 异常高 ({value:.2f})")
534
+
535
+ # 计算检测到多少关键特征
536
+ critical_count = len(ai_signs)
537
+ if critical_count >= 5:
538
+ ai_score = max(ai_score, 0.9)
539
+ elif critical_count >= 3:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
540
  ai_score = max(ai_score, 0.7)
541
 
542
  return min(ai_score, 1.0), ai_signs
543
 
544
  def detect_beauty_filter_signs(image_features):
545
+ """Detect beauty filter traces"""
546
  beauty_score = 0
547
  beauty_signs = []
548
 
549
+ # 只检查最重要的美颜滤镜指标
550
  if "face_skin_std" in image_features:
551
  if image_features["face_skin_std"] < 15:
552
  beauty_score += 0.3
553
+ beauty_signs.append("皮肤质感过于均匀")
 
 
 
554
 
 
 
 
 
 
 
 
555
  if "edge_density" in image_features:
556
  if image_features["edge_density"] < 0.03:
557
  beauty_score += 0.2
558
+ beauty_signs.append("边缘过于平滑")
 
 
 
559
 
 
560
  if "noise_level" in image_features:
561
  if image_features["noise_level"] < 1.0:
562
  beauty_score += 0.2
563
+ beauty_signs.append("噪点异常少")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
564
 
565
  return min(beauty_score, 1.0), beauty_signs
566
 
567
  def detect_photoshop_signs(image_features):
568
+ """Detect Photoshop traces"""
569
  ps_score = 0
570
  ps_signs = []
571
 
572
+ # 只检查最重要的PS指标
573
  if "texture_homogeneity" in image_features:
574
  if image_features["texture_homogeneity"] > 0.4:
575
  ps_score += 0.2
576
  ps_signs.append("皮肤质感过于均匀")
 
 
 
577
 
 
578
  if "edge_density" in image_features:
579
  if image_features["edge_density"] < 0.01:
580
  ps_score += 0.2
581
  ps_signs.append("边缘过于平滑")
 
 
 
582
 
 
583
  if "color_std" in image_features:
584
  if image_features["color_std"] > 50:
585
  ps_score += 0.2
586
  ps_signs.append("颜色分布极不自然")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
587
 
588
  return min(ps_score, 1.0), ps_signs
589
+
590
+ def get_detailed_analysis(ai_probability, ps_score, beauty_score, ps_signs, ai_signs, beauty_signs, valid_models_count, ai_feature_score):
591
+ """Provide detailed analysis with two-level classification"""
592
 
593
+ # 根据模型数量调整置信度
594
  confidence_prefix = ""
595
  if valid_models_count >= 3:
596
  confidence_prefix = "极高置信度:"
 
599
  elif valid_models_count == 1:
600
  confidence_prefix = "中等置信度:"
601
 
602
+ # 处理特征与模型判断不一致的情况
603
  if ai_feature_score > 0.8 and ai_probability < 0.6:
604
+ ai_probability = max(0.8, ai_probability)
605
  category = confidence_prefix + "AI生成图像(基于特征分析)"
606
  description = "基于多种典型AI特征分析,该图像很可能是AI生成的,尽管模型判断结果不确定。"
607
  main_category = "AI生成"
608
  elif ai_feature_score > 0.6 and ai_probability < 0.5:
609
+ ai_probability = max(0.7, ai_probability)
610
 
611
+ # 第一级分类:AI vs 真实
612
+ if ai_probability > 0.6:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
613
  category = confidence_prefix + "AI生成图像"
614
  description = "图像很可能是由AI完全生成,几乎没有真人照片的特征。"
615
  main_category = "AI生成"
616
  else:
617
+ # 第二级分类:素人 vs 修图
618
+ combined_edit_score = max(ps_score, beauty_score)
619
 
620
  if combined_edit_score > 0.5:
621
  category = confidence_prefix + "真人照片,修图痕迹明显"
 
626
  description = "图像很可能是未经大量处理的真人照片,保留了自然的细节和特征。"
627
  main_category = "真人照片-素人"
628
 
629
+ # 处理边界情况
630
  if ai_probability > 0.45 and combined_edit_score > 0.7:
 
631
  category = confidence_prefix + "真人照片,修图痕迹明显(也可能是AI生成)"
632
  description = "图像可能是真人照片经过大量后期处理,也可能是AI生成图像。由于现代AI技术与高度修图效果相似,难以完全区分。"
633
  main_category = "真人照片-修图明显"
634
 
635
+ # 格式化详情
636
+ ps_details = "检测到的修图痕迹:" + "、".join(ps_signs) if ps_signs else "未检测到明显的修图痕迹。"
637
+ ai_details = "检测到的AI特征:" + "、".join(ai_signs) if ai_signs else "未检测到明显的AI生成特征。"
638
+ beauty_details = "检测到的美颜特征:" + "、".join(beauty_signs) if beauty_signs else "未检测到明显的美颜特征。"
 
 
 
 
 
 
 
 
 
 
 
 
 
639
 
640
  return category, description, ps_details, ai_details, beauty_details, main_category
641
 
642
  def detect_ai_image(image):
643
+ """Main detection function with optimizations"""
644
  if image is None:
645
  return {"error": "未提供图像"}
646
 
647
+ start_time = time.time()
648
+
649
+ # 步骤1:获取模型预测(仅在需要时加载模型)
650
  results = {}
651
  valid_models = 0
652
  weighted_ai_probability = 0
653
 
654
+ # 使用每个模型处理
655
+ for key, model_info in model_manager.models.items():
656
+ model_result = model_manager.get_model_prediction(key, image)
657
+
658
+ if model_result and "error" not in model_result:
659
+ results[key] = model_result
660
+ weighted_ai_probability += model_result["ai_probability"] * model_info["weight"]
661
+ valid_models += 1
662
+ else:
663
+ results[key] = model_result or {"model_name": model_info["name"], "error": "处理失败"}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
664
 
665
  # 计算最终加权概率
666
  if valid_models > 0:
667
+ final_ai_probability = weighted_ai_probability / sum(
668
+ m["weight"] for k, m in model_manager.models.items()
669
+ if k in model_manager.loaded_models
670
+ )
671
  else:
672
  return {"error": "所有模型加载失败"}
673
 
674
+ # 步骤2:提取图像特征(对大图像进行降采样)
675
+ # 确定是否需要降采样
676
+ downscale_factor = 1.0
677
+ if image.width * image.height > 1024 * 1024: # 对于大于1MP的图像
678
+ downscale_factor = min(1.0, 1024 * 1024 / (image.width * image.height))
679
 
680
+ # 提取特征
681
+ feature_extractor.clear_cache() # 清除之前运行的缓存
682
+ image_features = feature_extractor.analyze_image_features(image, downscale_factor)
683
 
684
+ # 步骤3:分析特征
685
+ ai_feature_score, ai_signs = check_ai_specific_features(image_features)
686
  ps_score, ps_signs = detect_photoshop_signs(image_features)
 
 
687
  beauty_score, beauty_signs = detect_beauty_filter_signs(image_features)
688
 
689
+ # 步骤4:根据特征调整概率
690
  adjusted_probability = final_ai_probability
691
 
692
+ # 增加特征分析的权重
693
+ if ai_feature_score > 0.8:
694
+ adjusted_probability = max(adjusted_probability, 0.8)
695
  elif ai_feature_score > 0.6:
696
  adjusted_probability = max(adjusted_probability, 0.7)
697
  elif ai_feature_score > 0.4:
698
  adjusted_probability = max(adjusted_probability, 0.6)
699
 
700
+ # 检查关键特征
701
  key_ai_features_count = 0
702
 
703
+ # LBP熵(微观纹理分析)
704
  if "lbp_entropy" in image_features and image_features["lbp_entropy"] < 2.5:
705
  key_ai_features_count += 1
706
  adjusted_probability += 0.1
707
 
708
+ # 频率各向异性
709
  if "freq_anisotropy" in image_features and image_features["freq_anisotropy"] < 0.1:
710
  key_ai_features_count += 1
711
  adjusted_probability += 0.1
712
 
713
+ # 细节空间分布
714
  if "detail_spatial_std" in image_features and image_features["detail_spatial_std"] < 5:
715
  key_ai_features_count += 1
716
  adjusted_probability += 0.1
717
 
718
+ # 多个关键特征强烈表明AI生成
719
  if key_ai_features_count >= 2:
720
  adjusted_probability = max(adjusted_probability, 0.7)
721
 
722
+ # 确保概率在有效范围内
 
 
 
 
 
 
 
 
 
 
 
 
 
723
  adjusted_probability = min(1.0, max(0.0, adjusted_probability))
724
 
725
+ # 步骤5:获取详细分析
726
  category, description, ps_details, ai_details, beauty_details, main_category = get_detailed_analysis(
727
+ adjusted_probability, ps_score, beauty_score, ps_signs, ai_signs, beauty_signs,
728
+ valid_models, ai_feature_score
729
  )
730
 
731
  # 构建最终结果
732
+ processing_time = time.time() - start_time
733
+
734
  final_result = {
735
  "ai_probability": adjusted_probability,
736
  "original_ai_probability": final_ai_probability,
 
743
  "ps_details": ps_details,
744
  "ai_details": ai_details,
745
  "beauty_details": beauty_details,
746
+ "processing_time": f"{processing_time:.2f} seconds",
747
  "individual_model_results": results,
748
+ # 只包含最重要的特征以减少响应大小
749
+ "key_features": {k: image_features[k] for k in CRITICAL_FEATURES if k in image_features}
750
  }
751
 
752
+ # 返回两个值:JSON结果和标签数据
753
  label_data = {main_category: 1.0}
754
  return final_result, label_data
755
 
 
761
  gr.JSON(label="详细分析结果"),
762
  gr.Label(label="主要分类", num_top_classes=1)
763
  ],
764
+ title="优化版AI图像检测API",
765
  description="多模型集成检测图像是否由AI生成或真人照片(素人/修图)",
766
  examples=None,
767
  allow_flagging="never"
768
  )
769
 
770
+ # 启动应用
771
+ if __name__ == "__main__":
772
+ iface.launch(share=True)