Update app.py
Browse files
app.py
CHANGED
@@ -7,17 +7,23 @@ from PIL import Image
|
|
7 |
# Ensure using GPU if available
|
8 |
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
|
9 |
|
10 |
-
# Load the model and processor
|
11 |
-
|
12 |
-
|
13 |
-
|
14 |
-
|
15 |
|
16 |
-
#
|
17 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
18 |
|
19 |
def predict_image(img, confidence_threshold):
|
20 |
-
|
21 |
if not isinstance(img, Image.Image):
|
22 |
raise ValueError(f"Expected a PIL Image, but got {type(img)}")
|
23 |
|
@@ -30,33 +36,63 @@ def predict_image(img, confidence_threshold):
|
|
30 |
# Resize the image
|
31 |
img_pil = transforms.Resize((256, 256))(img_pil)
|
32 |
|
33 |
-
#
|
34 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
35 |
|
36 |
-
#
|
37 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
38 |
|
39 |
-
#
|
40 |
-
|
41 |
-
|
42 |
-
|
|
|
43 |
|
44 |
-
|
45 |
-
|
46 |
-
return f"⚠️ AI Generated Image, Confidence: {result['artificial']:.4f}"
|
47 |
-
elif result['real'] >= confidence_threshold:
|
48 |
-
return f"✅ Real Photo, Confidence: {result['real']:.4f}"
|
49 |
-
else:
|
50 |
-
return "🤷♂️ Uncertain, not confident enough to call."
|
51 |
-
|
52 |
# Define the Gradio interface
|
53 |
image = gr.Image(label="Image to Analyze", sources=['upload'], type='pil') # Ensure the image type is PIL
|
54 |
confidence_slider = gr.Slider(0.0, 1.0, value=0.5, step=0.01, label="Confidence Threshold")
|
55 |
-
label = gr.
|
56 |
|
57 |
gr.Interface(
|
58 |
fn=predict_image,
|
59 |
inputs=[image, confidence_slider],
|
60 |
outputs=label,
|
61 |
-
title="AI Generated Classification"
|
|
|
62 |
).launch()
|
|
|
7 |
# Ensure using GPU if available
|
8 |
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
|
9 |
|
10 |
+
# Load the first model and processor
|
11 |
+
image_processor_1 = AutoImageProcessor.from_pretrained("haywoodsloan/ai-image-detector-deploy")
|
12 |
+
model_1 = Swinv2ForImageClassification.from_pretrained("haywoodsloan/ai-image-detector-deploy")
|
13 |
+
model_1 = model_1.to(device)
|
14 |
+
clf_1 = pipeline(model=model_1, task="image-classification", image_processor=image_processor_1, device=device)
|
15 |
|
16 |
+
# Load the second model
|
17 |
+
hfUser = "Heem2"
|
18 |
+
modelName = "AI-vs-Real-Image-Detection"
|
19 |
+
clf_2 = pipeline("image-classification", model=f"{hfUser}/{modelName}")
|
20 |
+
|
21 |
+
# Define class names for both models
|
22 |
+
class_names_1 = ['artificial', 'real']
|
23 |
+
class_names_2 = ['artificial', 'real'] # Adjust if the second model has different classes
|
24 |
|
25 |
def predict_image(img, confidence_threshold):
|
26 |
+
# Ensure the image is a PIL Image
|
27 |
if not isinstance(img, Image.Image):
|
28 |
raise ValueError(f"Expected a PIL Image, but got {type(img)}")
|
29 |
|
|
|
36 |
# Resize the image
|
37 |
img_pil = transforms.Resize((256, 256))(img_pil)
|
38 |
|
39 |
+
# Predict using the first model
|
40 |
+
try:
|
41 |
+
prediction_1 = clf_1(img_pil)
|
42 |
+
result_1 = {pred['label']: pred['score'] for pred in prediction_1}
|
43 |
+
|
44 |
+
# Ensure the result dictionary contains all class names
|
45 |
+
for class_name in class_names_1:
|
46 |
+
if class_name not in result_1:
|
47 |
+
result_1[class_name] = 0.0
|
48 |
+
|
49 |
+
# Check if either class meets the confidence threshold
|
50 |
+
if result_1['artificial'] >= confidence_threshold:
|
51 |
+
label_1 = f"Label: artificial, Confidence: {result_1['artificial']:.4f}"
|
52 |
+
elif result_1['real'] >= confidence_threshold:
|
53 |
+
label_1 = f"Label: real, Confidence: {result_1['real']:.4f}"
|
54 |
+
else:
|
55 |
+
label_1 = "Uncertain Classification"
|
56 |
+
except Exception as e:
|
57 |
+
label_1 = f"Error: {str(e)}"
|
58 |
|
59 |
+
# Predict using the second model
|
60 |
+
try:
|
61 |
+
prediction_2 = clf_2(img_pil)
|
62 |
+
result_2 = {pred['label']: pred['score'] for pred in prediction_2}
|
63 |
+
|
64 |
+
# Ensure the result dictionary contains all class names
|
65 |
+
for class_name in class_names_2:
|
66 |
+
if class_name not in result_2:
|
67 |
+
result_2[class_name] = 0.0
|
68 |
+
|
69 |
+
# Check if either class meets the confidence threshold
|
70 |
+
if result_2['artificial'] >= confidence_threshold:
|
71 |
+
label_2 = f"Label: artificial, Confidence: {result_2['artificial']:.4f}"
|
72 |
+
elif result_2['real'] >= confidence_threshold:
|
73 |
+
label_2 = f"Label: real, Confidence: {result_2['real']:.4f}"
|
74 |
+
else:
|
75 |
+
label_2 = "Uncertain Classification"
|
76 |
+
except Exception as e:
|
77 |
+
label_2 = f"Error: {str(e)}"
|
78 |
|
79 |
+
# Combine results
|
80 |
+
combined_results = {
|
81 |
+
"SwinV2": label_1,
|
82 |
+
"AI-vs-Real-Image-Detection": label_2
|
83 |
+
}
|
84 |
|
85 |
+
return combined_results
|
86 |
+
|
|
|
|
|
|
|
|
|
|
|
|
|
87 |
# Define the Gradio interface
|
88 |
image = gr.Image(label="Image to Analyze", sources=['upload'], type='pil') # Ensure the image type is PIL
|
89 |
confidence_slider = gr.Slider(0.0, 1.0, value=0.5, step=0.01, label="Confidence Threshold")
|
90 |
+
label = gr.JSON(label="Model Predictions")
|
91 |
|
92 |
gr.Interface(
|
93 |
fn=predict_image,
|
94 |
inputs=[image, confidence_slider],
|
95 |
outputs=label,
|
96 |
+
title="AI Generated Classification",
|
97 |
+
queue=True # Enable queuing to handle multiple predictions efficiently
|
98 |
).launch()
|