ethix's picture
Enhance results HTML generation with dynamic styling and detailed model information
77f3a53
raw
history blame
17 kB
import spaces
import gradio as gr
from transformers import pipeline, AutoImageProcessor, Swinv2ForImageClassification, AutoFeatureExtractor, AutoModelForImageClassification
from torchvision import transforms
import torch
from PIL import Image
import numpy as np
from utils.goat import call_inference
import io
import warnings
# Suppress warnings
warnings.filterwarnings("ignore", category=UserWarning, message="Using a slow image processor as `use_fast` is unset")
# Ensure using GPU if available
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
# Load the first model and processor
image_processor_1 = AutoImageProcessor.from_pretrained("haywoodsloan/ai-image-detector-deploy", use_fast=True)
model_1 = Swinv2ForImageClassification.from_pretrained("haywoodsloan/ai-image-detector-deploy")
model_1 = model_1.to(device)
clf_1 = pipeline(model=model_1, task="image-classification", image_processor=image_processor_1, device=device)
# Load the second model
model_2_path = "Heem2/AI-vs-Real-Image-Detection"
clf_2 = pipeline("image-classification", model=model_2_path, device=device)
# Load additional models
models = ["Organika/sdxl-detector", "cmckinle/sdxl-flux-detector"]
feature_extractor_3 = AutoFeatureExtractor.from_pretrained(models[0], device=device)
model_3 = AutoModelForImageClassification.from_pretrained(models[0]).to(device)
feature_extractor_4 = AutoFeatureExtractor.from_pretrained(models[1], device=device)
model_4 = AutoModelForImageClassification.from_pretrained(models[1]).to(device)
# Define class names for all models
class_names_1 = ['artificial', 'real']
class_names_2 = ['AI Image', 'Real Image']
labels_3 = ['AI', 'Real']
labels_4 = ['AI', 'Real']
def softmax(vector):
e = np.exp(vector - np.max(vector)) # for numerical stability
return e / e.sum()
def convert_pil_to_bytes(image, format='JPEG'):
img_byte_arr = io.BytesIO()
image.save(img_byte_arr, format=format)
img_byte_arr = img_byte_arr.getvalue()
return img_byte_arr
@spaces.GPU(duration=10)
def predict_image(img, confidence_threshold):
# Ensure the image is a PIL Image
if not isinstance(img, Image.Image):
raise ValueError(f"Expected a PIL Image, but got {type(img)}")
# Convert the image to RGB if not already
if img.mode != 'RGB':
img_pil = img.convert('RGB')
else:
img_pil = img
# Resize the image
img_pil = transforms.Resize((256, 256))(img_pil)
# Predict using the first model
try:
prediction_1 = clf_1(img_pil)
result_1 = {pred['label']: pred['score'] for pred in prediction_1}
result_1output = [1, result_1['real'], result_1['artificial']]
print(result_1output)
# Ensure the result dictionary contains all class names
for class_name in class_names_1:
if class_name not in result_1:
result_1[class_name] = 0.0
# Check if either class meets the confidence threshold
if result_1['artificial'] >= confidence_threshold:
label_1 = f"AI, Confidence: {result_1['artificial']:.4f}"
result_1output += ['AI']
elif result_1['real'] >= confidence_threshold:
label_1 = f"Real, Confidence: {result_1['real']:.4f}"
result_1output += ['REAL']
else:
label_1 = "Uncertain Classification"
result_1output += ['UNCERTAIN']
except Exception as e:
label_1 = f"Error: {str(e)}"
print(result_1output)
# Predict using the second model
try:
prediction_2 = clf_2(img_pil)
result_2 = {pred['label']: pred['score'] for pred in prediction_2}
result_2output = [2, result_2['Real Image'], result_2['AI Image']]
print(result_2output)
# Ensure the result dictionary contains all class names
for class_name in class_names_2:
if class_name not in result_2:
result_2[class_name] = 0.0
# Check if either class meets the confidence threshold
if result_2['AI Image'] >= confidence_threshold:
label_2 = f"AI, Confidence: {result_2['AI Image']:.4f}"
result_2output += ['AI']
elif result_2['Real Image'] >= confidence_threshold:
label_2 = f"Real, Confidence: {result_2['Real Image']:.4f}"
result_2output += ['REAL']
else:
label_2 = "Uncertain Classification"
result_2output += ['UNCERTAIN']
except Exception as e:
label_2 = f"Error: {str(e)}"
# Predict using the third model with softmax
try:
inputs_3 = feature_extractor_3(img_pil, return_tensors="pt").to(device)
with torch.no_grad():
outputs_3 = model_3(**inputs_3)
logits_3 = outputs_3.logits
probabilities_3 = softmax(logits_3.cpu().numpy()[0])
result_3 = {
labels_3[1]: float(probabilities_3[1]), # Real
labels_3[0]: float(probabilities_3[0]) # AI
}
result_3output = [3, float(probabilities_3[1]), float(probabilities_3[0])]
print(result_3output)
# Ensure the result dictionary contains all class names
for class_name in labels_3:
if class_name not in result_3:
result_3[class_name] = 0.0
# Check if either class meets the confidence threshold
if result_3['AI'] >= confidence_threshold:
label_3 = f"AI, Confidence: {result_3['AI']:.4f}"
result_3output += ['AI']
elif result_3['Real'] >= confidence_threshold:
label_3 = f"Real, Confidence: {result_3['Real']:.4f}"
result_3output += ['REAL']
else:
label_3 = "Uncertain Classification"
result_3output += ['UNCERTAIN']
except Exception as e:
label_3 = f"Error: {str(e)}"
# Predict using the fourth model with softmax
try:
inputs_4 = feature_extractor_4(img_pil, return_tensors="pt").to(device)
with torch.no_grad():
outputs_4 = model_4(**inputs_4)
logits_4 = outputs_4.logits
probabilities_4 = softmax(logits_4.cpu().numpy()[0])
result_4 = {
labels_4[1]: float(probabilities_4[1]), # Real
labels_4[0]: float(probabilities_4[0]) # AI
}
result_4output = [4, float(probabilities_4[1]), float(probabilities_4[0])]
print(result_4)
# Ensure the result dictionary contains all class names
for class_name in labels_4:
if class_name not in result_4:
result_4[class_name] = 0.0
# Check if either class meets the confidence threshold
if result_4['AI'] >= confidence_threshold:
label_4 = f"AI, Confidence: {result_4['AI']:.4f}"
result_4output += ['AI']
elif result_4['Real'] >= confidence_threshold:
label_4 = f"Real, Confidence: {result_4['Real']:.4f}"
result_4output += ['REAL']
else:
label_4 = "Uncertain Classification"
result_4output += ['UNCERTAIN']
except Exception as e:
label_4 = f"Error: {str(e)}"
try:
result_5output = [5, 0.0, 0.0, 'MAINTENANCE']
img_bytes = convert_pil_to_bytes(img_pil)
# print(img)
# print(img_bytes)
response5_raw = call_inference(img)
print(response5_raw)
response5 = response5_raw
print(response5)
label_5 = f"Result: {response5}"
except Exception as e:
label_5 = f"Error: {str(e)}"
# Combine results
combined_results = {
"SwinV2/detect": label_1,
"ViT/AI-vs-Real": label_2,
"Swin/SDXL": label_3,
"Swin/SDXL-FLUX": label_4,
"GOAT": label_5
}
combined_outputs = [ result_1output, result_2output, result_3output, result_4output, result_5output ]
return img_pil, combined_outputs
# Define a function to generate the HTML content
def generate_results_html(results):
def get_header_color(label):
if label == 'AI':
return 'bg-danger'
elif label == 'REAL':
return 'bg-success'
elif label == 'UNCERTAIN':
return 'bg-warning'
elif label == 'MAINTENANCE':
return 'bg-info'
else:
return 'bg-secondary'
print(results)
html_content = f"""
<link href="https://stackpath.bootstrapcdn.com/bootstrap/4.3.1/css/bootstrap.min.css" rel="stylesheet">
<div class="container">
<div class="row mt-4 px-2">
<div class="col">
<h5>SwinV2/detect <span class="badge badge-secondary ml-1">M1</span></h5>
<p>{results[0][3]}</p>
</div>
<div class="col">
<h5>ViT/AI-vs-Real <span class="badge badge-secondary ml-1">M2</span></h5>
<p>{results[1][3]}</p>
</div>
<div class="col">
<h5>Swin/SDXL <span class="badge badge-secondary ml-1">M3</span></h5>
<p>{results[2][3]}</p>
</div>
<div class="col">
<h5>Swin/SDXL-FLUX <span class="badge badge-secondary ml-1">M4</span></h5>
<p>{results[3][3]}</p>
</div>
<div class="col">
<h5>GOAT <span class="badge badge-secondary ml-1">M5</span></h5>
<p>{results[4][3]}</p>
</div>
</div>
<div class="col">
<div class="card-group">
<div class="card">
<div class="card-header {get_header_color(results[0][-1])}" style="height:120px;">
<span class="text-center font-weight-bolder">{results[0][-1]}</span>
</div>
<div class="card-body">
<h5 class="card-title">SwinV2/detect <span class="badge badge-secondary ml-1">M1</span></h5>
<div class="progress">
<div class="progress-bar" role="progressbar" style="width: {results[0][-3] * 100:.2f}%;" aria-valuenow="{results[0][-3] * 100:.2f}" aria-valuemin="0" aria-valuemax="100">{results[0][-3] * 100:.2f}% (Real)</div>
</div>
<div class="progress">
<div class="progress-bar bg-danger" role="progressbar" style="width: {results[0][-4] * 100:.2f}%;" aria-valuenow="{results[0][-4] * 100:.2f}" aria-valuemin="0" aria-valuemax="100">{results[0][-4] * 100:.2f}% (AI)</div>
</div>
</div>
<div class="card-footer">
<small class="text-muted">model by @haywoodsloan / more info</small>
</div>
</div>
<div class="card">
<div class="card-header {get_header_color(results[0][-1])}" style="height:120px;">
<span class="text-center font-weight-bolder">{results[0][-1]}</span>
</div>
<div class="card-body">
<h5 class="card-title">SwinV2/detect <span class="badge badge-secondary ml-1">M1</span></h5>
<div class="progress">
<div class="progress-bar" role="progressbar" style="width: {results[0][-3] * 100:.2f}%;" aria-valuenow="{results[0][-3] * 100:.2f}" aria-valuemin="0" aria-valuemax="100">{results[0][-3] * 100:.2f}% (Real)</div>
</div>
<div class="progress">
<div class="progress-bar bg-danger" role="progressbar" style="width: {results[0][-4] * 100:.2f}%;" aria-valuenow="{results[0][-4] * 100:.2f}" aria-valuemin="0" aria-valuemax="100">{results[0][-4] * 100:.2f}% (AI)</div>
</div>
</div>
<div class="card-footer">
<small class="text-muted">model by @haywoodsloan / more info</small>
</div>
</div>
<div class="card">
<div class="card-header {get_header_color(results[0][-1])}" style="height:120px;">
<span class="text-center font-weight-bolder">{results[0][-1]}</span>
</div>
<div class="card-body">
<h5 class="card-title">SwinV2/detect <span class="badge badge-secondary ml-1">M1</span></h5>
<div class="progress">
<div class="progress-bar" role="progressbar" style="width: {results[0][-3] * 100:.2f}%;" aria-valuenow="{results[0][-3] * 100:.2f}" aria-valuemin="0" aria-valuemax="100">{results[0][-3] * 100:.2f}% (Real)</div>
</div>
<div class="progress">
<div class="progress-bar bg-danger" role="progressbar" style="width: {results[0][-4] * 100:.2f}%;" aria-valuenow="{results[0][-4] * 100:.2f}" aria-valuemin="0" aria-valuemax="100">{results[0][-4] * 100:.2f}% (AI)</div>
</div>
</div>
<div class="card-footer">
<small class="text-muted">model by @haywoodsloan / more info</small>
</div>
</div>
<div class="card">
<div class="card-header {get_header_color(results[0][-1])}" style="height:120px;">
<span class="text-center font-weight-bolder">{results[0][-1]}</span>
</div>
<div class="card-body">
<h5 class="card-title">SwinV2/detect <span class="badge badge-secondary ml-1">M1</span></h5>
<div class="progress">
<div class="progress-bar" role="progressbar" style="width: {results[0][-3] * 100:.2f}%;" aria-valuenow="{results[0][-3] * 100:.2f}" aria-valuemin="0" aria-valuemax="100">{results[0][-3] * 100:.2f}% (Real)</div>
</div>
<div class="progress">
<div class="progress-bar bg-danger" role="progressbar" style="width: {results[0][-4] * 100:.2f}%;" aria-valuenow="{results[0][-4] * 100:.2f}" aria-valuemin="0" aria-valuemax="100">{results[0][-4] * 100:.2f}% (AI)</div>
</div>
</div>
<div class="card-footer">
<small class="text-muted">model by @haywoodsloan / more info</small>
</div>
</div>
<div class="card">
<div class="card-header {get_header_color(results[0][-1])}" style="height:120px;">
<span class="text-center font-weight-bolder">{results[0][-1]}</span>
</div>
<div class="card-body">
<h5 class="card-title">SwinV2/detect <span class="badge badge-secondary ml-1">M1</span></h5>
<div class="progress">
<div class="progress-bar" role="progressbar" style="width: {results[0][-3] * 100:.2f}%;" aria-valuenow="{results[0][-3] * 100:.2f}" aria-valuemin="0" aria-valuemax="100">{results[0][-3] * 100:.2f}% (Real)</div>
</div>
<div class="progress">
<div class="progress-bar bg-danger" role="progressbar" style="width: {results[0][-4] * 100:.2f}%;" aria-valuenow="{results[0][-4] * 100:.2f}" aria-valuemin="0" aria-valuemax="100">{results[0][-4] * 100:.2f}% (AI)</div>
</div>
</div>
<div class="card-footer">
<small class="text-muted">model by @haywoodsloan / more info</small>
</div>
</div>
</div>
</div>
</div>
"""
return html_content
# Modify the predict_image function to return the HTML content
def predict_image_with_html(img, confidence_threshold):
img_pil, results = predict_image(img, confidence_threshold)
html_content = generate_results_html(results)
return img_pil, html_content
# Define the Gradio interface
with gr.Blocks() as iface:
gr.Markdown("# AI Generated Image Classification")
with gr.Row():
with gr.Column(scale=2):
image_input = gr.Image(label="Upload Image to Analyze", sources=['upload'], type='pil')
confidence_slider = gr.Slider(0.0, 1.0, value=0.5, step=0.01, label="Confidence Threshold")
inputs = [image_input, confidence_slider]
with gr.Column(scale=3):
image_output = gr.Image(label="Processed Image")
# Custom HTML component to display results in 5 columns
results_html = gr.HTML(label="Model Predictions")
outputs = [image_output, results_html]
gr.Button("Predict").click(fn=predict_image_with_html, inputs=inputs, outputs=outputs)
# Launch the interface
iface.launch()