File size: 7,397 Bytes
93a8bce 93f5629 4a03e59 93f5629 e88a32d 12cea06 4a03e59 b1387d5 4a03e59 1ea7edd d62865a d5a469d b1387d5 e88a32d 93f5629 52ae10e b1387d5 52ae10e 93f5629 52ae10e fa24808 09e67fe 52ae10e 4a03e59 113bed9 09e67fe 113bed9 09e67fe 113bed9 4a03e59 52ae10e 4a03e59 113bed9 4a03e59 1ea7edd 4a03e59 d5a469d 4a03e59 e88a32d 1d42aa4 52ae10e 7820a52 12cea06 a9d7990 52ae10e 268b7e1 52ae10e 1d42aa4 52ae10e 1d42aa4 52ae10e a9d7990 52ae10e 268b7e1 52ae10e b1387d5 1d42aa4 b1387d5 1d42aa4 52ae10e a9d7990 1ea7edd 4a03e59 113bed9 1ea7edd 113bed9 268b7e1 113bed9 268b7e1 4a03e59 113bed9 4a03e59 1d42aa4 4a03e59 1d42aa4 4a03e59 1ea7edd 4a03e59 113bed9 1ea7edd 113bed9 268b7e1 4a03e59 113bed9 4a03e59 1d42aa4 4a03e59 1d42aa4 4a03e59 9d94875 d5a469d edf85c0 52ae10e 1d42aa4 d5a469d 52ae10e a9d7990 52ae10e a9d7990 12cea06 e88a32d 52ae10e e88a32d 8a3f635 e077442 e88a32d e077442 113bed9 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 |
import spaces
import gradio as gr
from transformers import pipeline, AutoImageProcessor, Swinv2ForImageClassification, AutoFeatureExtractor, AutoModelForImageClassification
from torchvision import transforms
import torch
from PIL import Image
import pandas as pd
import warnings
import math
import numpy as np
from utils.goat import call_inference
import io
import sys
# Suppress warnings
warnings.filterwarnings("ignore", category=UserWarning, message="Using a slow image processor as `use_fast` is unset")
# Ensure using GPU if available
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
# Load the first model and processor
image_processor_1 = AutoImageProcessor.from_pretrained("haywoodsloan/ai-image-detector-deploy", use_fast=True)
model_1 = Swinv2ForImageClassification.from_pretrained("haywoodsloan/ai-image-detector-deploy")
model_1 = model_1.to(device)
clf_1 = pipeline(model=model_1, task="image-classification", image_processor=image_processor_1, device=device)
# Load the second model
model_2_path = "Heem2/AI-vs-Real-Image-Detection"
clf_2 = pipeline("image-classification", model=model_2_path, device=device)
# Load additional models
models = ["Organika/sdxl-detector", "cmckinle/sdxl-flux-detector"]
# Load the third and fourth models
feature_extractor_3 = AutoFeatureExtractor.from_pretrained(models[0], device=device)
model_3 = AutoModelForImageClassification.from_pretrained(models[0]).to(device)
feature_extractor_4 = AutoFeatureExtractor.from_pretrained(models[1], device=device)
model_4 = AutoModelForImageClassification.from_pretrained(models[1]).to(device)
# Define class names for all models
class_names_1 = ['artificial', 'real']
class_names_2 = ['AI Image', 'Real Image']
labels_3 = ['AI', 'Real']
labels_4 = ['AI', 'Real']
def softmax(vector):
e = np.exp(vector - np.max(vector)) # for numerical stability
return e / e.sum()
def convert_pil_to_bytes(image, format='JPEG'):
img_byte_arr = io.BytesIO()
image.save(img_byte_arr, format=format)
img_byte_arr = img_byte_arr.getvalue()
return img_byte_arr
@spaces.GPU(duration=10)
def predict_image(img, confidence_threshold):
# Ensure the image is a PIL Image
if not isinstance(img, Image.Image):
raise ValueError(f"Expected a PIL Image, but got {type(img)}")
# Convert the image to RGB if not already
if img.mode != 'RGB':
img_pil = img.convert('RGB')
else:
img_pil = img
# Resize the image
img_pil = transforms.Resize((256, 256))(img_pil)
# Predict using the first model
try:
prediction_1 = clf_1(img_pil)
result_1 = {pred['label']: pred['score'] for pred in prediction_1}
print(result_1)
# Ensure the result dictionary contains all class names
for class_name in class_names_1:
if class_name not in result_1:
result_1[class_name] = 0.0
# Check if either class meets the confidence threshold
if result_1['artificial'] >= confidence_threshold:
label_1 = f"AI, Confidence: {result_1['artificial']:.4f}"
elif result_1['real'] >= confidence_threshold:
label_1 = f"Real, Confidence: {result_1['real']:.4f}"
else:
label_1 = "Uncertain Classification"
except Exception as e:
label_1 = f"Error: {str(e)}"
# Predict using the second model
try:
prediction_2 = clf_2(img_pil)
result_2 = {pred['label']: pred['score'] for pred in prediction_2}
print(result_2)
# Ensure the result dictionary contains all class names
for class_name in class_names_2:
if class_name not in result_2:
result_2[class_name] = 0.0
# Check if either class meets the confidence threshold
if result_2['AI Image'] >= confidence_threshold:
label_2 = f"AI, Confidence: {result_2['AI Image']:.4f}"
elif result_2['Real Image'] >= confidence_threshold:
label_2 = f"Real, Confidence: {result_2['Real Image']:.4f}"
else:
label_2 = "Uncertain Classification"
except Exception as e:
label_2 = f"Error: {str(e)}"
# Predict using the third model with softmax
try:
inputs_3 = feature_extractor_3(img_pil, return_tensors="pt").to(device)
with torch.no_grad():
outputs_3 = model_3(**inputs_3)
logits_3 = outputs_3.logits
probabilities_3 = softmax(logits_3.cpu().numpy()[0])
result_3 = {
labels_3[0]: float(probabilities_3[0]), # AI
labels_3[1]: float(probabilities_3[1]) # Real
}
print(result_3)
# Ensure the result dictionary contains all class names
for class_name in labels_3:
if class_name not in result_3:
result_3[class_name] = 0.0
# Check if either class meets the confidence threshold
if result_3['AI'] >= confidence_threshold:
label_3 = f"AI, Confidence: {result_3['AI']:.4f}"
elif result_3['Real'] >= confidence_threshold:
label_3 = f"Real, Confidence: {result_3['Real']:.4f}"
else:
label_3 = "Uncertain Classification"
except Exception as e:
label_3 = f"Error: {str(e)}"
# Predict using the fourth model with softmax
try:
inputs_4 = feature_extractor_4(img_pil, return_tensors="pt").to(device)
with torch.no_grad():
outputs_4 = model_4(**inputs_4)
logits_4 = outputs_4.logits
probabilities_4 = softmax(logits_4.cpu().numpy()[0])
result_4 = {
labels_4[0]: float(probabilities_4[0]), # AI
labels_4[1]: float(probabilities_4[1]) # Real
}
print(result_4)
# Ensure the result dictionary contains all class names
for class_name in labels_4:
if class_name not in result_4:
result_4[class_name] = 0.0
# Check if either class meets the confidence threshold
if result_4['AI'] >= confidence_threshold:
label_4 = f"AI, Confidence: {result_4['AI']:.4f}"
elif result_4['Real'] >= confidence_threshold:
label_4 = f"Real, Confidence: {result_4['Real']:.4f}"
else:
label_4 = "Uncertain Classification"
except Exception as e:
label_4 = f"Error: {str(e)}"
try:
img_bytes = convert_pil_to_bytes(img_pil)
response5_raw = call_inference(img_bytes)
response5 = response5_raw.json()
print(response5)
except Exception as e:
label_5 = f"Error: {str(e)}"
# Combine results
combined_results = {
"SwinV2/detect": label_1,
"ViT/AI-vs-Real": label_2,
"Swin/SDXL": label_3,
"Swin/SDXL-FLUX": label_4,
"GOAT": label_5
}
return combined_results
# Define the Gradio interface
image = gr.Image(label="Image to Analyze", sources=['upload'], type='pil') # Ensure the image type is PIL
confidence_slider = gr.Slider(0.0, 1.0, value=0.5, step=0.01, label="Confidence Threshold")
label = gr.JSON(label="Model Predictions")
# Launch the interface
iface = gr.Interface(
fn=predict_image,
inputs=[image, confidence_slider],
outputs=label,
title="AI Generated Classification"
)
iface.launch()
|