import gradio as gr import torch from PIL import Image import numpy as np import cv2 from transformers import AutoImageProcessor, AutoModelForImageClassification # 加载多个检测模型 models = { "model1": { "name": "umm-maybe/AI-image-detector", "processor": None, "model": None, "weight": 0.5 }, "model2": { "name": "microsoft/resnet-50", # 通用图像分类模型 "processor": None, "model": None, "weight": 0.25 }, "model3": { "name": "google/vit-base-patch16-224", # Vision Transformer模型 "processor": None, "model": None, "weight": 0.25 } } # 初始化模型 for key in models: try: models[key]["processor"] = AutoImageProcessor.from_pretrained(models[key]["name"]) models[key]["model"] = AutoModelForImageClassification.from_pretrained(models[key]["name"]) print(f"成功加载模型: {models[key]['name']}") except Exception as e: print(f"加载模型 {models[key]['name']} 失败: {str(e)}") models[key]["processor"] = None models[key]["model"] = None def process_model_output(model_info, outputs, probabilities): """处理不同模型的输出,统一返回AI生成概率""" model_name = model_info["name"].lower() # 针对不同模型的特殊处理 if "ai-image-detector" in model_name: # umm-maybe/AI-image-detector模型特殊处理 # 检查标签 ai_label_idx = None human_label_idx = None for idx, label in model_info["model"].config.id2label.items(): label_lower = label.lower() if "ai" in label_lower or "generated" in label_lower or "fake" in label_lower: ai_label_idx = idx if "human" in label_lower or "real" in label_lower: human_label_idx = idx # 修正后的标签解释逻辑 if human_label_idx is not None: # 如果预测为human,则AI概率应该低 ai_probability = 1 - float(probabilities[0][human_label_idx].item()) elif ai_label_idx is not None: # 如果预测为AI,则AI概率应该高 ai_probability = float(probabilities[0][ai_label_idx].item()) else: # 默认情况 ai_probability = 0.5 elif "resnet" in model_name: # 通用图像分类模型,使用简单启发式方法 predicted_class_idx = outputs.logits.argmax(-1).item() # 检查是否有与AI相关的类别 predicted_class = model_info["model"].config.id2label[predicted_class_idx].lower() # 简单启发式:检查类别名称是否包含与AI生成相关的关键词 ai_keywords = ["artificial", "generated", "synthetic", "fake", "computer"] for keyword in ai_keywords: if keyword in predicted_class: return float(probabilities[0][predicted_class_idx].item()) # 如果没有明确的AI类别,返回中等概率 return 0.5 elif "vit" in model_name: # Vision Transformer模型 predicted_class_idx = outputs.logits.argmax(-1).item() # 同样检查类别名称 predicted_class = model_info["model"].config.id2label[predicted_class_idx].lower() # 简单启发式:检查类别名称是否包含与AI生成相关的关键词 ai_keywords = ["artificial", "generated", "synthetic", "fake", "computer"] for keyword in ai_keywords: if keyword in predicted_class: return float(probabilities[0][predicted_class_idx].item()) # 如果没有明确的AI类别,返回中等概率 return 0.5 # 默认处理 predicted_class_idx = outputs.logits.argmax(-1).item() predicted_class = model_info["model"].config.id2label[predicted_class_idx].lower() if "ai" in predicted_class or "generated" in predicted_class or "fake" in predicted_class: return float(probabilities[0][predicted_class_idx].item()) else: return 1 - float(probabilities[0][predicted_class_idx].item()) return ai_probability def analyze_image_features(image): """分析图像特征""" # 转换为OpenCV格式 img_array = np.array(image) if len(img_array.shape) == 3 and img_array.shape[2] == 3: img_cv = cv2.cvtColor(img_array, cv2.COLOR_RGB2BGR) else: img_cv = img_array features = {} # 基本特征 features["width"] = image.width features["height"] = image.height features["aspect_ratio"] = image.width / max(1, image.height) # 颜色分析 if len(img_array.shape) == 3: features["avg_red"] = float(np.mean(img_array[:,:,0])) features["avg_green"] = float(np.mean(img_array[:,:,1])) features["avg_blue"] = float(np.mean(img_array[:,:,2])) # 颜色标准差 - 用于检测颜色分布是否自然 features["color_std"] = float(np.std([ features["avg_red"], features["avg_green"], features["avg_blue"] ])) # 颜色局部变化 - 真实照片通常有更多局部颜色变化 local_color_variations = [] for i in range(0, img_array.shape[0]-10, 10): for j in range(0, img_array.shape[1]-10, 10): patch = img_array[i:i+10, j:j+10] local_color_variations.append(np.std(patch)) features["local_color_variation"] = float(np.mean(local_color_variations)) # 边缘一致性分析 edges = cv2.Canny(img_cv, 100, 200) features["edge_density"] = float(np.sum(edges > 0) / (image.width * image.height)) # 边缘自然度分析 - 真实照片的边缘通常更自然 if len(img_array.shape) == 3: gray = cv2.cvtColor(img_cv, cv2.COLOR_BGR2GRAY) sobelx = cv2.Sobel(gray, cv2.CV_64F, 1, 0, ksize=3) sobely = cv2.Sobel(gray, cv2.CV_64F, 0, 1, ksize=3) edge_magnitude = np.sqrt(sobelx**2 + sobely**2) features["edge_variance"] = float(np.var(edge_magnitude)) # 纹理分析 - 使用灰度共生矩阵 if len(img_array.shape) == 3: gray = cv2.cvtColor(img_cv, cv2.COLOR_BGR2GRAY) from skimage.feature import graycomatrix, graycoprops # 计算GLCM distances = [5] angles = [0, np.pi/4, np.pi/2, 3*np.pi/4] glcm = graycomatrix(gray, distances=distances, angles=angles, symmetric=True, normed=True) # 计算GLCM属性 features["texture_contrast"] = float(np.mean(graycoprops(glcm, 'contrast')[0])) features["texture_homogeneity"] = float(np.mean(graycoprops(glcm, 'homogeneity')[0])) features["texture_correlation"] = float(np.mean(graycoprops(glcm, 'correlation')[0])) features["texture_energy"] = float(np.mean(graycoprops(glcm, 'energy')[0])) features["texture_dissimilarity"] = float(np.mean(graycoprops(glcm, 'dissimilarity')[0])) features["texture_ASM"] = float(np.mean(graycoprops(glcm, 'ASM')[0])) # 噪声分析 if len(img_array.shape) == 3: blurred = cv2.GaussianBlur(img_cv, (5, 5), 0) noise = cv2.absdiff(img_cv, blurred) features["noise_level"] = float(np.mean(noise)) # 噪声分布 - 用于检测噪声是否自然 features["noise_std"] = float(np.std(noise)) # 噪声频谱分析 - 真实照片的噪声频谱更自然 noise_fft = np.fft.fft2(noise[:,:,0]) noise_fft_shift = np.fft.fftshift(noise_fft) noise_magnitude = np.abs(noise_fft_shift) features["noise_spectrum_std"] = float(np.std(noise_magnitude)) # 对称性分析 - AI生成图像通常有更高的对称性 if img_cv.shape[1] % 2 == 0: # 确保宽度是偶数 left_half = img_cv[:, :img_cv.shape[1]//2] right_half = cv2.flip(img_cv[:, img_cv.shape[1]//2:], 1) if left_half.shape == right_half.shape: h_symmetry = 1 - float(np.mean(cv2.absdiff(left_half, right_half)) / 255) features["horizontal_symmetry"] = h_symmetry if img_cv.shape[0] % 2 == 0: # 确保高度是偶数 top_half = img_cv[:img_cv.shape[0]//2, :] bottom_half = cv2.flip(img_cv[img_cv.shape[0]//2:, :], 0) if top_half.shape == bottom_half.shape: v_symmetry = 1 - float(np.mean(cv2.absdiff(top_half, bottom_half)) / 255) features["vertical_symmetry"] = v_symmetry # 频率域分析 - 检测不自然的频率分布 if len(img_array.shape) == 3: gray = cv2.cvtColor(img_cv, cv2.COLOR_BGR2GRAY) f_transform = np.fft.fft2(gray) f_shift = np.fft.fftshift(f_transform) magnitude = np.log(np.abs(f_shift) + 1) # 计算高频和低频成分的比例 h, w = magnitude.shape center_h, center_w = h // 2, w // 2 # 低频区域 (中心区域) low_freq_region = magnitude[center_h-h//8:center_h+h//8, center_w-w//8:center_w+w//8] low_freq_mean = np.mean(low_freq_region) # 高频区域 (边缘区域) high_freq_mean = np.mean(magnitude) - low_freq_mean features["freq_ratio"] = float(high_freq_mean / max(low_freq_mean, 0.001)) # 频率分布的自然度 - 真实照片通常有更自然的频率分布 freq_std = np.std(magnitude) features["freq_std"] = float(freq_std) # 尝试检测人脸 try: face_cascade = cv2.CascadeClassifier(cv2.data.haarcascades + 'haarcascade_frontalface_default.xml') gray = cv2.cvtColor(img_cv, cv2.COLOR_BGR2GRAY) faces = face_cascade.detectMultiScale(gray, 1.1, 4) features["face_count"] = len(faces) if len(faces) > 0: # 分析人脸特征 face_features = [] for (x, y, w, h) in faces: face = img_cv[y:y+h, x:x+w] # 皮肤质感分析 face_hsv = cv2.cvtColor(face, cv2.COLOR_BGR2HSV) skin_mask = cv2.inRange(face_hsv, (0, 20, 70), (20, 150, 255)) skin_pixels = face[skin_mask > 0] if len(skin_pixels) > 0: face_features.append({ "skin_std": float(np.std(skin_pixels)), "skin_local_contrast": float(np.mean(cv2.Laplacian(face, cv2.CV_64F))), "face_symmetry": analyze_face_symmetry(face) }) if face_features: features["face_skin_std"] = np.mean([f["skin_std"] for f in face_features]) features["face_local_contrast"] = np.mean([f["skin_local_contrast"] for f in face_features]) features["face_symmetry"] = np.mean([f["face_symmetry"] for f in face_features]) except: # 如果人脸检测失败,不添加人脸特征 pass # 分析衣物细节 clothing_features = analyze_clothing_details(img_cv) features.update(clothing_features) # 分析手部和关节 extremity_features = analyze_extremities(img_cv) features.update(extremity_features) return features def analyze_face_symmetry(face): """分析人脸对称性""" if face.shape[1] % 2 == 0: # 确保宽度是偶数 left_half = face[:, :face.shape[1]//2] right_half = cv2.flip(face[:, face.shape[1]//2:], 1) if left_half.shape == right_half.shape: return 1 - float(np.mean(cv2.absdiff(left_half, right_half)) / 255) return 0.5 # 默认值 def analyze_clothing_details(image): """分析衣物细节的自然度""" features = {} try: # 转换为灰度图 if len(image.shape) == 3: gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) else: gray = image # 使用Canny边缘检测 edges = cv2.Canny(gray, 50, 150) # 使用霍夫变换检测直线 lines = cv2.HoughLinesP(edges, 1, np.pi/180, threshold=50, minLineLength=50, maxLineGap=10) if lines is not None: # 计算直线的角度分布 angles = [] for line in lines: x1, y1, x2, y2 = line[0] if x2 - x1 != 0: # 避免除以零 angle = np.arctan((y2 - y1) / (x2 - x1)) * 180 / np.pi angles.append(angle) if angles: # 计算角度的标准差 - AI生成的衣物褶皱通常角度分布不自然 features["clothing_angle_std"] = float(np.std(angles)) # 计算角度的直方图 - 检查是否有过多相似角度(AI生成特征) hist, _ = np.histogram(angles, bins=18, range=(-90, 90)) max_count = np.max(hist) total_count = np.sum(hist) features["clothing_angle_uniformity"] = float(max_count / max(total_count, 1)) # 分析纹理的一致性 # 将图像分成小块,计算每个块的纹理特征 block_size = 32 h, w = gray.shape texture_variations = [] for i in range(0, h-block_size, block_size): for j in range(0, w-block_size, block_size): block = gray[i:i+block_size, j:j+block_size] # 计算局部LBP特征或简单的方差 texture_variations.append(np.var(block)) if texture_variations: # 计算纹理变化的标准差 - 真实衣物纹理变化更自然 features["clothing_texture_std"] = float(np.std(texture_variations)) # 计算纹理变化的均值 - AI生成的衣物纹理通常变化较小 features["clothing_texture_mean"] = float(np.mean(texture_variations)) except: # 如果分析失败,不添加衣物特征 pass return features def analyze_extremities(image): """分析手指、脚趾等末端细节""" features = {} try: # 转换为灰度图 if len(image.shape) == 3: gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) else: gray = image # 使用形态学操作提取可能的手部区域 _, thresh = cv2.threshold(gray, 120, 255, cv2.THRESH_BINARY) kernel = np.ones((5,5), np.uint8) opening = cv2.morphologyEx(thresh, cv2.MORPH_OPEN, kernel) # 寻找轮廓 contours, _ = cv2.findContours(opening, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) # 分析轮廓的复杂度 if contours: # 计算轮廓的周长与面积比 - 手指等细节会增加这个比值 perimeter_area_ratios = [] for contour in contours: area = cv2.contourArea(contour) if area > 100: # 忽略太小的轮廓 perimeter = cv2.arcLength(contour, True) ratio = perimeter / max(area, 1) perimeter_area_ratios.append(ratio) if perimeter_area_ratios: features["extremity_perimeter_area_ratio"] = float(np.mean(perimeter_area_ratios)) # 计算凸包缺陷 - 手指之间的间隙会产生凸包缺陷 defect_depths = [] for contour in contours: if len(contour) > 5: # 需要足够多的点来计算凸包 hull = cv2.convexHull(contour, returnPoints=False) if len(hull) > 3: # 需要至少4个点来计算凸包缺陷 try: defects = cv2.convexityDefects(contour, hull) if defects is not None: for i in range(defects.shape[0]): _, _, _, depth = defects[i, 0] defect_depths.append(depth) except: pass if defect_depths: features["extremity_defect_depth_mean"] = float(np.mean(defect_depths)) features["extremity_defect_depth_std"] = float(np.std(defect_depths)) except: # 如果分析失败,不添加末端特征 pass return features def check_ai_specific_features(image_features): """检查AI生成图像的典型特征""" ai_score = 0 ai_signs = [] # 检查对称性 - AI生成图像通常对称性高,但权重降低 if "horizontal_symmetry" in image_features and "vertical_symmetry" in image_features: avg_symmetry = (image_features["horizontal_symmetry"] + image_features["vertical_symmetry"]) / 2 if avg_symmetry > 0.8: # 提高阈值 ai_score += 0.2 # 降低权重 ai_signs.append("图像对称性异常高") elif avg_symmetry > 0.7: ai_score += 0.1 ai_signs.append("图像对称性较高") # 检查纹理相关性 - AI生成图像通常纹理相关性高 if "texture_correlation" in image_features: if image_features["texture_correlation"] > 0.95: # 提高阈值 ai_score += 0.2 ai_signs.append("纹理相关性异常高") elif image_features["texture_correlation"] > 0.9: ai_score += 0.1 ai_signs.append("纹理相关性较高") # 检查边缘与噪声的关系 - AI生成图像通常边缘清晰但噪声不自然 if "edge_density" in image_features and "noise_level" in image_features: edge_noise_ratio = image_features["edge_density"] / max(image_features["noise_level"], 0.001) if edge_noise_ratio < 0.01: ai_score += 0.2 ai_signs.append("边缘与噪声分布不自然") # 检查颜色平滑度 - AI生成图像通常颜色过渡更平滑 if "color_std" in image_features and image_features["color_std"] < 10: ai_score += 0.1 # 降低权重 ai_signs.append("颜色过渡异常平滑") # 检查纹理能量 - AI生成图像通常纹理能量分布不自然 if "texture_energy" in image_features and image_features["texture_energy"] < 0.01: ai_score += 0.2 ai_signs.append("纹理能量分布不自然") # 检查频率比例 - AI生成图像通常频率分布不自然 if "freq_ratio" in image_features: if image_features["freq_ratio"] < 0.1 or image_features["freq_ratio"] > 2.0: ai_score += 0.1 # 降低权重 ai_signs.append("频率分布不自然") # 检查局部颜色变化 - 真实照片通常有更多局部颜色变化 if "local_color_variation" in image_features and image_features["local_color_variation"] < 5: ai_score += 0.2 ai_signs.append("局部颜色变化异常少") # 检查边缘变化 - 真实照片的边缘通常更自然 if "edge_variance" in image_features and image_features["edge_variance"] < 100: ai_score += 0.2 ai_signs.append("边缘变化异常均匀") # 检查噪声频谱 - 真实照片的噪声频谱更自然 if "noise_spectrum_std" in image_features and image_features["noise_spectrum_std"] < 1000: ai_score += 0.2 ai_signs.append("噪声频谱异常规则") # 检查人脸特征 - AI生成的人脸通常有特定特征 if "face_symmetry" in image_features and image_features["face_symmetry"] > 0.8: ai_score += 0.2 ai_signs.append("人脸对称性异常高") if "face_skin_std" in image_features and image_features["face_skin_std"] < 10: ai_score += 0.3 ai_signs.append("皮肤质感异常均匀") # 检查衣物特征 - AI生成的衣物通常有特定问题 if "clothing_angle_uniformity" in image_features and image_features["clothing_angle_uniformity"] > 0.3: ai_score += 0.3 ai_signs.append("衣物褶皱角度分布不自然") if "clothing_texture_std" in image_features and image_features["clothing_texture_std"] < 100: ai_score += 0.2 ai_signs.append("衣物纹理变化异常均匀") # 检查手部特征 - AI生成的手部通常有特定问题 if "extremity_perimeter_area_ratio" in image_features: if image_features["extremity_perimeter_area_ratio"] < 0.05: ai_score += 0.3 ai_signs.append("手部/末端轮廓异常平滑") if "extremity_defect_depth_std" in image_features and image_features["extremity_defect_depth_std"] < 10: ai_score += 0.2 ai_signs.append("手指间隙异常均匀") return min(ai_score, 1.0), ai_signs def detect_beauty_filter_signs(image_features): """检测美颜滤镜痕迹""" beauty_score = 0 beauty_signs = [] # 检查皮肤质感 if "face_skin_std" in image_features: if image_features["face_skin_std"] < 15: beauty_score += 0.3 beauty_signs.append("皮肤质感过于均匀,典型美颜特征") elif image_features["face_skin_std"] < 25: beauty_score += 0.2 beauty_signs.append("皮肤质感较为均匀,可能使用了美颜") # 检查局部对比度 - 美颜通常会降低局部对比度 if "face_local_contrast" in image_features: if image_features["face_local_contrast"] < 5: beauty_score += 0.2 beauty_signs.append("面部局部对比度低,典型美颜特征") # 检查边缘平滑度 - 美颜通常会平滑边缘 if "edge_density" in image_features: if image_features["edge_density"] < 0.03: beauty_score += 0.2 beauty_signs.append("边缘过于平滑,典型美颜特征") elif image_features["edge_density"] < 0.05: beauty_score += 0.1 beauty_signs.append("边缘较为平滑,可能使用了美颜") # 检查噪点 - 美颜通常会减少噪点 if "noise_level" in image_features: if image_features["noise_level"] < 1.0: beauty_score += 0.2 beauty_signs.append("噪点异常少,典型美颜特征") elif image_features["noise_level"] < 2.0: beauty_score += 0.1 beauty_signs.append("噪点较少,可能使用了美颜") # 检查人脸对称性 - 美颜通常会增加对称性 if "face_symmetry" in image_features: if image_features["face_symmetry"] > 0.8: beauty_score += 0.2 beauty_signs.append("面部对称性异常高,典型美颜特征") elif image_features["face_symmetry"] > 0.7: beauty_score += 0.1 beauty_signs.append("面部对称性较高,可能使用了美颜") return min(beauty_score, 1.0), beauty_signs def detect_photoshop_signs(image_features): """检测图像中的PS痕迹""" ps_score = 0 ps_signs = [] # 检查皮肤质感 if "texture_homogeneity" in image_features: if image_features["texture_homogeneity"] > 0.4: ps_score += 0.2 ps_signs.append("皮肤质感过于均匀") elif image_features["texture_homogeneity"] > 0.3: ps_score += 0.1 ps_signs.append("皮肤质感较为均匀") # 检查边缘不自然 if "edge_density" in image_features: if image_features["edge_density"] < 0.01: ps_score += 0.2 ps_signs.append("边缘过于平滑") elif image_features["edge_density"] < 0.03: ps_score += 0.1 ps_signs.append("边缘较为平滑") # 检查颜色不自然 if "color_std" in image_features: if image_features["color_std"] > 50: ps_score += 0.2 ps_signs.append("颜色分布极不自然") elif image_features["color_std"] > 30: ps_score += 0.1 ps_signs.append("颜色分布略不自然") # 检查噪点不一致 if "noise_level" in image_features and "noise_std" in image_features: noise_ratio = image_features["noise_std"] / max(image_features["noise_level"], 0.001) if noise_ratio < 0.5: ps_score += 0.2 ps_signs.append("噪点分布不自然") elif noise_ratio < 0.7: ps_score += 0.1 ps_signs.append("噪点分布略不自然") # 检查频率分布不自然 if "freq_ratio" in image_features: if image_features["freq_ratio"] < 0.2: ps_score += 0.2 ps_signs.append("频率分布不自然,可能有过度模糊处理") elif image_features["freq_ratio"] > 2.0: ps_score += 0.2 ps_signs.append("频率分布不自然,可能有过度锐化处理") return min(ps_score, 1.0), ps_signs def get_detailed_analysis(ai_probability, ps_score, beauty_score, ps_signs, ai_signs, beauty_signs, valid_models_count): """提供更详细的分析结果""" # 根据有效模型数量调整置信度描述 confidence_prefix = "" if valid_models_count >= 3: confidence_prefix = "极高置信度:" elif valid_models_count == 2: confidence_prefix = "高置信度:" elif valid_models_count == 1: confidence_prefix = "中等置信度:" # 调整后的阈值判断,考虑美颜因素和衣物细节 if ai_probability > 0.7: # 高AI概率 category = confidence_prefix + "高概率AI生成" description = "图像很可能是由AI完全生成,几乎没有真人照片的特征。" elif ai_probability > 0.5: # 中等AI概率 if beauty_score > 0.6: # 高美颜分数 category = confidence_prefix + "可能是重度美颜的真人照片" description = "图像可能是真人照片经过重度美颜处理,也可能是AI生成图像。" elif ps_score > 0.5: # 高PS分数 category = confidence_prefix + "中等概率AI生成,高概率PS修图" description = "图像可能是真人照片经过大量后期处理,或是AI生成后经过修饰的图像。" else: category = confidence_prefix + "中等概率AI生成" description = "图像有较多AI生成的特征,但也保留了一些真实照片的特点。" elif ai_probability > 0.3: # 低AI概率 if beauty_score > 0.5: # 中高美颜分数 category = confidence_prefix + "很可能是美颜处理的真人照片" description = "图像很可能是真人照片经过美颜处理,美颜痕迹明显。" elif ps_score > 0.5: # 高PS分数 category = confidence_prefix + "低概率AI生成,高概率PS修图" description = "图像更可能是真人照片经过大量后期处理,PS痕迹明显。" else: category = confidence_prefix + "低概率AI生成" description = "图像更可能是真人照片,但有一些AI生成或修饰的特征。" else: # 很低AI概率 if beauty_score > 0.6: category = confidence_prefix + "真人照片,重度美颜处理" description = "图像基本是真人照片,但经过了重度美颜处理。" elif ps_score > 0.6: category = confidence_prefix + "真人照片,重度PS修图" description = "图像基本是真人照片,但经过了大量后期处理,修饰痕迹明显。" elif ps_score > 0.3 or beauty_score > 0.3: category = confidence_prefix + "真人照片,中度修图或美颜" description = "图像是真人照片,有明显的后期处理或美颜痕迹。" elif ps_score > 0.1 or beauty_score > 0.1: category = confidence_prefix + "真人照片,轻度修图或美颜" description = "图像是真人照片,有少量后期处理或美颜。" else: category = confidence_prefix + "高概率真人照片,几乎无修图" description = "图像几乎可以确定是未经大量处理的真人照片。" # 添加具体的PS痕迹描述 if ps_signs: ps_details = "检测到的修图痕迹:" + "、".join(ps_signs) else: ps_details = "未检测到明显的修图痕迹。" # 添加AI特征描述 if ai_signs: ai_details = "检测到的AI特征:" + "、".join(ai_signs) else: ai_details = "未检测到明显的AI生成特征。" # 添加美颜特征描述 if beauty_signs: beauty_details = "检测到的美颜特征:" + "、".join(beauty_signs) else: beauty_details = "未检测到明显的美颜特征。" return category, description, ps_details, ai_details, beauty_details def detect_ai_image(image): """主检测函数""" if image is None: return {"error": "未提供图像"} results = {} valid_models = 0 weighted_ai_probability = 0 # 使用每个模型进行预测 for key, model_info in models.items(): if model_info["processor"] is not None and model_info["model"] is not None: try: # 处理图像 inputs = model_info["processor"](images=image, return_tensors="pt") with torch.no_grad(): outputs = model_info["model"](**inputs) # 获取概率 probabilities = torch.nn.functional.softmax(outputs.logits, dim=-1) # 使用适配器处理不同模型的输出 ai_probability = process_model_output(model_info, outputs, probabilities) # 添加到结果 predicted_class_idx = outputs.logits.argmax(-1).item() results[key] = { "model_name": model_info["name"], "ai_probability": ai_probability, "predicted_class": model_info["model"].config.id2label[predicted_class_idx] } # 累加加权概率 weighted_ai_probability += ai_probability * model_info["weight"] valid_models += 1 except Exception as e: results[key] = { "model_name": model_info["name"], "error": str(e) } # 计算最终加权概率 if valid_models > 0: final_ai_probability = weighted_ai_probability / sum(m["weight"] for k, m in models.items() if m["processor"] is not None and m["model"] is not None) else: return {"error": "所有模型加载失败"} # 分析图像特征 image_features = analyze_image_features(image) # 检查AI特定特征 ai_feature_score, ai_signs = check_ai_specific_features(image_features) # 分析PS痕迹 ps_score, ps_signs = detect_photoshop_signs(image_features) # 分析美颜痕迹 beauty_score, beauty_signs = detect_beauty_filter_signs(image_features) # 应用特征权重调整AI概率 adjusted_probability = final_ai_probability # 如果AI特征分数高,提高AI概率 if ai_feature_score > 0.7: adjusted_probability = max(adjusted_probability, 0.7) elif ai_feature_score > 0.5: adjusted_probability = max(adjusted_probability, 0.6) elif ai_feature_score > 0.3: adjusted_probability = max(adjusted_probability, 0.5) # 如果检测到衣物或手部异常,大幅提高AI概率 if "clothing_angle_uniformity" in image_features and image_features["clothing_angle_uniformity"] > 0.3: adjusted_probability = max(adjusted_probability, 0.7) if "extremity_perimeter_area_ratio" in image_features and image_features["extremity_perimeter_area_ratio"] < 0.05: adjusted_probability = max(adjusted_probability, 0.7) # 如果美颜分数高但AI特征分数不高,降低AI概率 if beauty_score > 0.6 and ai_feature_score < 0.5: adjusted_probability = min(adjusted_probability, 0.5) # 高对称性是AI生成的指标,但权重降低 if "horizontal_symmetry" in image_features and image_features["horizontal_symmetry"] > 0.8: adjusted_probability += 0.1 if "vertical_symmetry" in image_features and image_features["vertical_symmetry"] > 0.8: adjusted_probability += 0.1 # 高纹理相关性通常表示AI生成 if "texture_correlation" in image_features and image_features["texture_correlation"] > 0.95: adjusted_probability += 0.1 # 低边缘密度通常表示AI生成 if image_features["edge_density"] < 0.01: adjusted_probability += 0.1 # 如果检测到人脸特征异常,增加AI概率 if "face_skin_std" in image_features and image_features["face_skin_std"] < 10: adjusted_probability += 0.2 # 确保概率在0-1范围内 adjusted_probability = min(1.0, max(0.0, adjusted_probability)) # 如果umm-maybe/AI-image-detector模型的预测与其他模型不一致,增加其权重 if "model1" in results and "ai_probability" in results["model1"]: ai_detector_prob = results["model1"]["ai_probability"] # 如果专用AI检测器给出的概率与调整后概率差异大,增加其权重 if abs(ai_detector_prob - adjusted_probability) > 0.3: adjusted_probability = (adjusted_probability + ai_detector_prob * 2) / 3 # 获取详细分析 category, description, ps_details, ai_details, beauty_details = get_detailed_analysis( adjusted_probability, ps_score, beauty_score, ps_signs, ai_signs, beauty_signs, valid_models ) # 构建最终结果 final_result = { "ai_probability": adjusted_probability, "original_ai_probability": final_ai_probability, "ps_score": ps_score, "beauty_score": beauty_score, "ai_feature_score": ai_feature_score, "category": category, "description": description, "ps_details": ps_details, "ai_details": ai_details, "beauty_details": beauty_details, "individual_model_results": results, "features": image_features } return final_result # 创建Gradio界面 iface = gr.Interface( fn=detect_ai_image, inputs=gr.Image(type="pil"), outputs=gr.JSON(), title="增强型AI图像检测API", description="多模型集成检测图像是否由AI生成,同时分析PS修图和美颜痕迹,特别关注衣物和手部等细节问题", examples=None, allow_flagging="never" ) iface.launch()