alfakat commited on
Commit
903d9dc
·
verified ·
1 Parent(s): 1991389

Upload 5 files

Browse files
.gitattributes CHANGED
@@ -33,3 +33,6 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ examples/image00.jpg filter=lfs diff=lfs merge=lfs -text
37
+ examples/image01.jpg filter=lfs diff=lfs merge=lfs -text
38
+ examples/image02.JPG filter=lfs diff=lfs merge=lfs -text
examples/image00.jpg ADDED

Git LFS Details

  • SHA256: 0711631f28e68a4744d044d107857580dbb8c0dddc456d94d24c1601ab40943e
  • Pointer size: 131 Bytes
  • Size of remote file: 103 kB
examples/image01.jpg ADDED

Git LFS Details

  • SHA256: 296b02738cd1ac9846ee0f30c049cd70d1896fb05608cb254ddec7e4327c9447
  • Pointer size: 132 Bytes
  • Size of remote file: 2.41 MB
examples/image02.JPG ADDED

Git LFS Details

  • SHA256: 0528a8b66848cff62693b01179298675e8481aaf646a917e5b72e512ded90820
  • Pointer size: 132 Bytes
  • Size of remote file: 3.32 MB
requirements ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ torch>=2.0.0
2
+ transformers>=4.40.0
3
+ gradio>=4.18.0
4
+ Pillow
run_realify.py ADDED
@@ -0,0 +1,57 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from PIL import Image
3
+ import gradio as gr
4
+ from transformers import ViTForImageClassification, ViTImageProcessor, CLIPProcessor, CLIPModel
5
+
6
+ """Load models"""
7
+ # Path to model can be replaced by local one
8
+ deepfake_model = ViTForImageClassification.from_pretrained("prithivMLmods/Deep-Fake-Detector-v2-Model")
9
+ deepfake_processor = ViTImageProcessor.from_pretrained("prithivMLmods/Deep-Fake-Detector-v2-Model")
10
+ clip_model_path = "openai/clip-vit-base-patch32"
11
+
12
+ clip_model = CLIPModel.from_pretrained(clip_model_path)
13
+ clip_processor = CLIPProcessor.from_pretrained(clip_model_path)
14
+ clip_model.eval()
15
+ deepfake_model.eval()
16
+
17
+ clip_labels = ["photo of a real person or scene", "synthetic or AI generated image"]
18
+
19
+
20
+ def hybrid_classifier(img: Image.Image):
21
+ df_inputs = deepfake_processor(images=img, return_tensors="pt")
22
+ with torch.no_grad():
23
+ df_outputs = deepfake_model(**df_inputs)
24
+ df_probs = torch.nn.functional.softmax(df_outputs.logits, dim=1)
25
+ df_real_score = df_probs[0][deepfake_model.config.label2id["Realism"]].item()
26
+ df_fake_score = df_probs[0][deepfake_model.config.label2id["Deepfake"]].item()
27
+
28
+ clip_inputs = clip_processor(text=clip_labels, images=img, return_tensors="pt", padding=True)
29
+ with torch.no_grad():
30
+ clip_outputs = clip_model(**clip_inputs)
31
+ clip_probs = torch.softmax(clip_outputs.logits_per_image, dim=1).squeeze()
32
+ clip_real_score = clip_probs[0].item()
33
+ clip_fake_score = clip_probs[1].item()
34
+
35
+ final_real_score = (0.3 * df_real_score + 0.7 * clip_real_score)
36
+ final_fake_score = (0.3 * df_fake_score + 0.7 * clip_fake_score)
37
+
38
+ decision = "Generated"
39
+ if abs(final_real_score - final_fake_score) > 0.3:
40
+ decision = "Real" if final_real_score > final_fake_score else decision
41
+
42
+ return decision
43
+
44
+
45
+ """Preset and launch Gradio """
46
+ iface = gr.Interface(
47
+ fn=hybrid_classifier,
48
+ inputs=gr.Image(type="pil"),
49
+ outputs=gr.Textbox(label="Final Decision"),
50
+ title="RealifyAI",
51
+ description="Figure out if image real or generated",
52
+ examples=[["examples/image00.jpg"],
53
+ ["examples/image01.jpg"],
54
+ ["examples/image02.jpg"]])
55
+
56
+ if __name__ == "__main__":
57
+ iface.launch()