LPX
commited on
Commit
·
73ffe6a
1
Parent(s):
5268185
feat: simple prediction
Browse files- app_mcp.py +19 -9
- requirements.txt +1 -0
app_mcp.py
CHANGED
@@ -3,6 +3,8 @@ import time
|
|
3 |
from typing import Literal
|
4 |
import spaces
|
5 |
import gradio as gr
|
|
|
|
|
6 |
from transformers import pipeline, AutoImageProcessor, SwinForImageClassification, Swinv2ForImageClassification, AutoFeatureExtractor, AutoModelForImageClassification
|
7 |
from torchvision import transforms
|
8 |
import torch
|
@@ -90,7 +92,6 @@ MODEL_PATHS = {
|
|
90 |
"model_3": "Organika/sdxl-detector",
|
91 |
"model_4": "cmckinle/sdxl-flux-detector_v1.1",
|
92 |
"model_5": "prithivMLmods/Deep-Fake-Detector-v2-Model",
|
93 |
-
"model_5b": "prithivMLmods/Deepfake-Detection-Exp-02-22",
|
94 |
"model_6": "ideepankarsharma2003/AI_ImageClassification_MidjourneyV6_SDXL",
|
95 |
"model_7": "date3k2/vit-real-fake-classification-v4"
|
96 |
}
|
@@ -101,7 +102,6 @@ CLASS_NAMES = {
|
|
101 |
"model_3": ['AI', 'Real'],
|
102 |
"model_4": ['AI', 'Real'],
|
103 |
"model_5": ['Realism', 'Deepfake'],
|
104 |
-
"model_5b": ['Real', 'Deepfake'],
|
105 |
"model_6": ['ai_gen', 'human'],
|
106 |
"model_7": ['Fake', 'Real'],
|
107 |
|
@@ -196,11 +196,6 @@ register_model_with_metadata(
|
|
196 |
display_name="Vit Based", contributor="prithivMLmods", model_path=MODEL_PATHS["model_5"]
|
197 |
)
|
198 |
|
199 |
-
clf_5b = pipeline("image-classification", model=MODEL_PATHS["model_5b"], device=device)
|
200 |
-
register_model_with_metadata(
|
201 |
-
"model_5b", clf_5b, preprocess_resize_224, postprocess_pipeline, CLASS_NAMES["model_5b"],
|
202 |
-
display_name="Vit Based, Newer Dataset", contributor="prithivMLmods", model_path=MODEL_PATHS["model_5b"]
|
203 |
-
)
|
204 |
|
205 |
image_processor_6 = AutoImageProcessor.from_pretrained(MODEL_PATHS["model_6"], use_fast=True)
|
206 |
model_6 = SwinForImageClassification.from_pretrained(MODEL_PATHS["model_6"]).to(device)
|
@@ -252,7 +247,7 @@ def infer(image: Image.Image, model_id: str, confidence_threshold: float = 0.75)
|
|
252 |
|
253 |
def predict_image(img, confidence_threshold):
|
254 |
model_ids = [
|
255 |
-
"model_1", "model_2", "model_3", "model_4", "model_5", "
|
256 |
]
|
257 |
results = [infer(img, model_id, confidence_threshold) for model_id in model_ids]
|
258 |
return img, results
|
@@ -488,6 +483,14 @@ def predict_with_ensemble(img, confidence_threshold, augment_methods, rotate_deg
|
|
488 |
|
489 |
return img_pil, cleaned_forensics_images, table_rows, json_results, consensus_html
|
490 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
491 |
with gr.Blocks(css="#post-gallery { overflow: hidden !important;} .grid-wrap{ overflow-y: hidden !important;} .ms-gr-ant-welcome-icon{ height:unset !important;} .tabs{margin-top:10px;}") as demo:
|
492 |
|
493 |
with gr.Tab("👀 Detection Models Eval / Playground"):
|
@@ -553,7 +556,14 @@ with gr.Blocks(css="#post-gallery { overflow: hidden !important;} .grid-wrap{ ov
|
|
553 |
gr.load("aiwithoutborders-xyz/OpenSight-Community-Forensics-Preview", src="spaces")
|
554 |
with gr.Tab("🥇 Leaderboard"):
|
555 |
gr.Markdown("# AI Generated / Deepfake Detection Models Leaderboard: Soon™")
|
556 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
557 |
with gr.Tab("Wavelet Blocking Noise Estimation", visible=False):
|
558 |
gr.Interface(
|
559 |
fn=wavelet_blocking_noise_estimation,
|
|
|
3 |
from typing import Literal
|
4 |
import spaces
|
5 |
import gradio as gr
|
6 |
+
from gradio_client import Client, handle_file
|
7 |
+
|
8 |
from transformers import pipeline, AutoImageProcessor, SwinForImageClassification, Swinv2ForImageClassification, AutoFeatureExtractor, AutoModelForImageClassification
|
9 |
from torchvision import transforms
|
10 |
import torch
|
|
|
92 |
"model_3": "Organika/sdxl-detector",
|
93 |
"model_4": "cmckinle/sdxl-flux-detector_v1.1",
|
94 |
"model_5": "prithivMLmods/Deep-Fake-Detector-v2-Model",
|
|
|
95 |
"model_6": "ideepankarsharma2003/AI_ImageClassification_MidjourneyV6_SDXL",
|
96 |
"model_7": "date3k2/vit-real-fake-classification-v4"
|
97 |
}
|
|
|
102 |
"model_3": ['AI', 'Real'],
|
103 |
"model_4": ['AI', 'Real'],
|
104 |
"model_5": ['Realism', 'Deepfake'],
|
|
|
105 |
"model_6": ['ai_gen', 'human'],
|
106 |
"model_7": ['Fake', 'Real'],
|
107 |
|
|
|
196 |
display_name="Vit Based", contributor="prithivMLmods", model_path=MODEL_PATHS["model_5"]
|
197 |
)
|
198 |
|
|
|
|
|
|
|
|
|
|
|
199 |
|
200 |
image_processor_6 = AutoImageProcessor.from_pretrained(MODEL_PATHS["model_6"], use_fast=True)
|
201 |
model_6 = SwinForImageClassification.from_pretrained(MODEL_PATHS["model_6"]).to(device)
|
|
|
247 |
|
248 |
def predict_image(img, confidence_threshold):
|
249 |
model_ids = [
|
250 |
+
"model_1", "model_2", "model_3", "model_4", "model_5", "model_6", "model_7"
|
251 |
]
|
252 |
results = [infer(img, model_id, confidence_threshold) for model_id in model_ids]
|
253 |
return img, results
|
|
|
483 |
|
484 |
return img_pil, cleaned_forensics_images, table_rows, json_results, consensus_html
|
485 |
|
486 |
+
def simple_prediction(img):
|
487 |
+
client = Client("aiwithoutborders-xyz/OpenSight-Community-Forensics-Preview")
|
488 |
+
result = client.predict(
|
489 |
+
input_image=handle_file(img),
|
490 |
+
api_name="/simple_predict"
|
491 |
+
)
|
492 |
+
return result
|
493 |
+
|
494 |
with gr.Blocks(css="#post-gallery { overflow: hidden !important;} .grid-wrap{ overflow-y: hidden !important;} .ms-gr-ant-welcome-icon{ height:unset !important;} .tabs{margin-top:10px;}") as demo:
|
495 |
|
496 |
with gr.Tab("👀 Detection Models Eval / Playground"):
|
|
|
556 |
gr.load("aiwithoutborders-xyz/OpenSight-Community-Forensics-Preview", src="spaces")
|
557 |
with gr.Tab("🥇 Leaderboard"):
|
558 |
gr.Markdown("# AI Generated / Deepfake Detection Models Leaderboard: Soon™")
|
559 |
+
with gr.Tab("Simple Predict", visible=False):
|
560 |
+
gr.Interface(
|
561 |
+
fn=simple_prediction,
|
562 |
+
inputs=gr.Image(type="filepath"),
|
563 |
+
outputs=gr.Text(),
|
564 |
+
title="Simple and Fast Prediction",
|
565 |
+
description=""
|
566 |
+
)
|
567 |
with gr.Tab("Wavelet Blocking Noise Estimation", visible=False):
|
568 |
gr.Interface(
|
569 |
fn=wavelet_blocking_noise_estimation,
|
requirements.txt
CHANGED
@@ -1,5 +1,6 @@
|
|
1 |
gradio[mcp]
|
2 |
gradio_leaderboard
|
|
|
3 |
transformers
|
4 |
huggingface_hub[hf_xet]
|
5 |
torchvision
|
|
|
1 |
gradio[mcp]
|
2 |
gradio_leaderboard
|
3 |
+
gradio_client
|
4 |
transformers
|
5 |
huggingface_hub[hf_xet]
|
6 |
torchvision
|