File size: 6,479 Bytes
93a8bce
93f5629
4a03e59
93f5629
e88a32d
12cea06
4a03e59
b1387d5
4a03e59
1ea7edd
b1387d5
 
 
e88a32d
 
 
93f5629
52ae10e
b1387d5
52ae10e
 
 
93f5629
52ae10e
fa24808
4a03e59
52ae10e
4a03e59
 
1ea7edd
 
4a03e59
 
52ae10e
4a03e59
 
 
 
 
1ea7edd
4a03e59
93f5629
4a03e59
e88a32d
52ae10e
7820a52
 
 
 
 
 
 
 
 
 
12cea06
a9d7990
52ae10e
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
a9d7990
52ae10e
 
 
 
 
 
 
 
 
 
 
b1387d5
 
 
 
52ae10e
 
 
 
a9d7990
1ea7edd
4a03e59
1ea7edd
 
 
 
 
4a03e59
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1ea7edd
4a03e59
1ea7edd
 
 
 
 
4a03e59
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
52ae10e
 
 
4a03e59
 
 
52ae10e
a9d7990
52ae10e
 
a9d7990
12cea06
e88a32d
52ae10e
e88a32d
8a3f635
e077442
e88a32d
 
 
e077442
 
8a3f635
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
import spaces
import gradio as gr
from transformers import pipeline, AutoImageProcessor, Swinv2ForImageClassification, AutoFeatureExtractor, AutoModelForImageClassification
from torchvision import transforms
import torch
from PIL import Image
import pandas as pd
import warnings
import math
import numpy as np

# Suppress warnings
warnings.filterwarnings("ignore", category=UserWarning, message="Using a slow image processor as `use_fast` is unset")

# Ensure using GPU if available
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

# Load the first model and processor
image_processor_1 = AutoImageProcessor.from_pretrained("haywoodsloan/ai-image-detector-deploy", use_fast=True)
model_1 = Swinv2ForImageClassification.from_pretrained("haywoodsloan/ai-image-detector-deploy")
model_1 = model_1.to(device)
clf_1 = pipeline(model=model_1, task="image-classification", image_processor=image_processor_1, device=device)

# Load the second model
model_2_path = "Heem2/AI-vs-Real-Image-Detection"
clf_2 = pipeline("image-classification", model=model_2_path)

# Load additional models
models = ["Organika/sdxl-detector", "cmckinle/sdxl-flux-detector"]
pipe0 = pipeline("image-classification", model=models[0])
pipe1 = pipeline("image-classification", model=models[1])

# Define class names for all models
class_names_1 = ['artificial', 'real']
class_names_2 = ['AI Image', 'Real Image']
class_names_3 = ['AI', 'Real']
class_names_4 = ['AI', 'Real']

def softmax(vector):
    e = np.exp(vector - np.max(vector))  # for numerical stability
    return e / e.sum()

@spaces.GPU(duration=10)
def predict_image(img, confidence_threshold):
    # Ensure the image is a PIL Image
    if not isinstance(img, Image.Image):
        raise ValueError(f"Expected a PIL Image, but got {type(img)}")
    
    # Convert the image to RGB if not already
    if img.mode != 'RGB':
        img_pil = img.convert('RGB')
    else:
        img_pil = img
    
    # Resize the image
    img_pil = transforms.Resize((256, 256))(img_pil)
    
    # Predict using the first model
    try:
        prediction_1 = clf_1(img_pil)
        result_1 = {pred['label']: pred['score'] for pred in prediction_1}
        
        # Ensure the result dictionary contains all class names
        for class_name in class_names_1:
            if class_name not in result_1:
                result_1[class_name] = 0.0
        
        # Check if either class meets the confidence threshold
        if result_1['artificial'] >= confidence_threshold:
            label_1 = f"Label: artificial, Confidence: {result_1['artificial']:.4f}"
        elif result_1['real'] >= confidence_threshold:
            label_1 = f"Label: real, Confidence: {result_1['real']:.4f}"
        else:
            label_1 = "Uncertain Classification"
    except Exception as e:
        label_1 = f"Error: {str(e)}"
    
    # Predict using the second model
    try:
        prediction_2 = clf_2(img_pil)
        result_2 = {pred['label']: pred['score'] for pred in prediction_2}
        
        # Ensure the result dictionary contains all class names
        for class_name in class_names_2:
            if class_name not in result_2:
                result_2[class_name] = 0.0
        
        # Check if either class meets the confidence threshold
        if result_2['AI Image'] >= confidence_threshold:
            label_2 = f"Label: AI Image, Confidence: {result_2['AI Image']:.4f}"
        elif result_2['Real Image'] >= confidence_threshold:
            label_2 = f"Label: Real Image, Confidence: {result_2['Real Image']:.4f}"
        else:
            label_2 = "Uncertain Classification"
    except Exception as e:
        label_2 = f"Error: {str(e)}"
    
    # Predict using the third model with softmax
    try:
        with torch.no_grad():
            outputs = pipe0(img_pil)
            logits = outputs[0].logits if isinstance(outputs, list) else outputs.logits
            probabilities = softmax(logits.cpu().numpy())
            result_3 = {class_names_3[idx]: float(probabilities[idx]) for idx in range(len(class_names_3))}
        
        # Ensure the result dictionary contains all class names
        for class_name in class_names_3:
            if class_name not in result_3:
                result_3[class_name] = 0.0
        
        # Check if either class meets the confidence threshold
        if result_3['AI'] >= confidence_threshold:
            label_3 = f"Label: AI, Confidence: {result_3['AI']:.4f}"
        elif result_3['Real'] >= confidence_threshold:
            label_3 = f"Label: Real, Confidence: {result_3['Real']:.4f}"
        else:
            label_3 = "Uncertain Classification"
    except Exception as e:
        label_3 = f"Error: {str(e)}"
    
    # Predict using the fourth model with softmax
    try:
        with torch.no_grad():
            outputs = pipe1(img_pil)
            logits = outputs[0].logits if isinstance(outputs, list) else outputs.logits
            probabilities = softmax(logits.cpu().numpy())
            result_4 = {class_names_4[idx]: float(probabilities[idx]) for idx in range(len(class_names_4))}
        
        # Ensure the result dictionary contains all class names
        for class_name in class_names_4:
            if class_name not in result_4:
                result_4[class_name] = 0.0
        
        # Check if either class meets the confidence threshold
        if result_4['AI'] >= confidence_threshold:
            label_4 = f"Label: AI, Confidence: {result_4['AI']:.4f}"
        elif result_4['Real'] >= confidence_threshold:
            label_4 = f"Label: Real, Confidence: {result_4['Real']:.4f}"
        else:
            label_4 = "Uncertain Classification"
    except Exception as e:
        label_4 = f"Error: {str(e)}"
    
    # Combine results
    combined_results = {
        "SwinV2": label_1,
        "AI-vs-Real-Image-Detection": label_2,
        "Organika/sdxl-detector": label_3,
        "cmckinle/sdxl-flux-detector": label_4
    }
    
    return combined_results

# Define the Gradio interface
image = gr.Image(label="Image to Analyze", sources=['upload'], type='pil')  # Ensure the image type is PIL
confidence_slider = gr.Slider(0.0, 1.0, value=0.5, step=0.01, label="Confidence Threshold")
label = gr.JSON(label="Model Predictions")

# Launch the interface
iface = gr.Interface(
    fn=predict_image,
    inputs=[image, confidence_slider],
    outputs=label,
    title="AI Generated Classification"
)
iface.launch()