ashish-001 commited on
Commit
6d37900
·
verified ·
1 Parent(s): ea86353

Upload 5 files

Browse files
Files changed (6) hide show
  1. .gitattributes +1 -0
  2. Sample.mp4 +3 -0
  3. app.py +102 -0
  4. haarcascade_frontalface_default.xml +0 -0
  5. requirements.txt +5 -0
  6. test.ipynb +88 -0
.gitattributes CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ Sample.mp4 filter=lfs diff=lfs merge=lfs -text
Sample.mp4 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:eb48fbbfe295461889585a2c3ffe592ba208d2501018b9517f158108f11acd10
3
+ size 11293922
app.py ADDED
@@ -0,0 +1,102 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ from transformers import AutoImageProcessor, AutoModelForImageClassification
3
+ import cv2
4
+ import torch
5
+ import numpy as np
6
+ import tempfile
7
+
8
+ image_processor = AutoImageProcessor.from_pretrained(
9
+ 'ashish-001/deepfake-detection-using-ViT')
10
+ model = AutoModelForImageClassification.from_pretrained(
11
+ 'ashish-001/deepfake-detection-using-ViT')
12
+
13
+
14
+ def classify_frame(frame):
15
+ inputs = image_processor(images=frame, return_tensors="pt")
16
+ outputs = model(**inputs)
17
+ logits = outputs.logits
18
+ probs = torch.nn.functional.sigmoid(logits)
19
+ pred = torch.argmax(logits, dim=1).item()
20
+ lab = 'Real' if pred == 1 else 'Fake'
21
+ confidence, _ = torch.max(probs, dim=1)
22
+ return f"{lab}::{format(confidence.item(), '.2f')}"
23
+
24
+
25
+ st.title("Deepfake detector")
26
+ uploaded_file = st.file_uploader(
27
+ "Upload an image or video",
28
+ type=["jpg", "jpeg", "png", "mp4", "avi", "mov", "mkv"]
29
+ )
30
+ placeholder = st.empty()
31
+ if st.button('Detect'):
32
+ if uploaded_file is not None:
33
+ clf = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
34
+ mime_type = uploaded_file.type
35
+ if mime_type.startswith("image"):
36
+ file_bytes = uploaded_file.read()
37
+ np_arr = np.frombuffer(file_bytes, np.uint8)
38
+ image = cv2.imdecode(np_arr, cv2.IMREAD_COLOR)
39
+ image_rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
40
+ gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
41
+ faces = clf.detectMultiScale(
42
+ gray, scaleFactor=1.3, minNeighbors=5)
43
+ for (x, y, w, h) in faces:
44
+ cv2.rectangle(image_rgb, (x, y), (x+w, y+h), (0, 0, 255), 2)
45
+ face = image_rgb[y:y + h, x:x + w]
46
+ img = cv2.cvtColor(face, cv2.COLOR_BGR2RGB)
47
+ label = classify_frame(img)
48
+ new_frame = cv2.putText(
49
+ image_rgb, label, (x, y+h+20), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 0), 2)
50
+ st.image(new_frame)
51
+
52
+ elif mime_type.startswith('video'):
53
+ with tempfile.NamedTemporaryFile(delete=False, suffix=".mp4") as temp_file:
54
+ temp_file.write(uploaded_file.read())
55
+ temp_video_path = temp_file.name
56
+ cap = cv2.VideoCapture(temp_video_path)
57
+ if not cap.isOpened():
58
+ st.error("Error: Cannot open video file.")
59
+ else:
60
+ while True:
61
+ ret, frame = cap.read()
62
+ if not ret:
63
+ break
64
+ frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
65
+ gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
66
+ faces = clf.detectMultiScale(
67
+ gray, scaleFactor=1.3, minNeighbors=5)
68
+ for (x, y, w, h) in faces:
69
+ cv2.rectangle(
70
+ frame, (x, y), (x+w, y+h), (0, 0, 255), 2)
71
+ face = frame[y:y + h, x:x + w]
72
+ img = cv2.cvtColor(face, cv2.COLOR_BGR2RGB)
73
+ label = classify_frame(img)
74
+ frame = cv2.putText(
75
+ frame, label, (x, y+h+20), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 0), 2)
76
+ placeholder.image(frame)
77
+ cap.release()
78
+
79
+ if st.button('Use Example Video'):
80
+ clf = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
81
+ cap = cv2.VideoCapture("Sample.mp4")
82
+ if not cap.isOpened():
83
+ st.error("Error: Cannot open video file.")
84
+ else:
85
+ st.write(f"Video credits: 'Deep Fakes' Are Becoming More Realistic Thanks To New Technology. Link:https://www.youtube.com/watch?v=CDMVaQOvtxU")
86
+ while True:
87
+ ret, frame = cap.read()
88
+ if not ret:
89
+ break
90
+ frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
91
+ gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
92
+ faces = clf.detectMultiScale(gray, scaleFactor=1.3, minNeighbors=5)
93
+ for (x, y, w, h) in faces:
94
+ cv2.rectangle(
95
+ frame, (x, y), (x+w, y+h), (0, 0, 255), 2)
96
+ face = frame[y:y + h, x:x + w]
97
+ img = cv2.cvtColor(face, cv2.COLOR_BGR2RGB)
98
+ label = classify_frame(img)
99
+ frame = cv2.putText(
100
+ frame, label, (x, y+h+20), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 0), 2)
101
+ placeholder.image(frame)
102
+ cap.release()
haarcascade_frontalface_default.xml ADDED
The diff for this file is too large to render. See raw diff
 
requirements.txt ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ streamlit==1.29.0
2
+ transformers==4.35.2
3
+ opencv-python==4.7.0.72
4
+ torch==2.4.1
5
+ numpy==1.23.5
test.ipynb ADDED
@@ -0,0 +1,88 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cells": [
3
+ {
4
+ "cell_type": "code",
5
+ "execution_count": 1,
6
+ "metadata": {},
7
+ "outputs": [
8
+ {
9
+ "name": "stderr",
10
+ "output_type": "stream",
11
+ "text": [
12
+ "e:\\miniconda\\lib\\site-packages\\transformers\\utils\\generic.py:441: FutureWarning: `torch.utils._pytree._register_pytree_node` is deprecated. Please use `torch.utils._pytree.register_pytree_node` instead.\n",
13
+ " _torch_pytree._register_pytree_node(\n"
14
+ ]
15
+ }
16
+ ],
17
+ "source": [
18
+ "from transformers import AutoImageProcessor, AutoModelForImageClassification"
19
+ ]
20
+ },
21
+ {
22
+ "cell_type": "code",
23
+ "execution_count": 2,
24
+ "metadata": {},
25
+ "outputs": [
26
+ {
27
+ "name": "stderr",
28
+ "output_type": "stream",
29
+ "text": [
30
+ "e:\\miniconda\\lib\\site-packages\\transformers\\utils\\generic.py:309: FutureWarning: `torch.utils._pytree._register_pytree_node` is deprecated. Please use `torch.utils._pytree.register_pytree_node` instead.\n",
31
+ " _torch_pytree._register_pytree_node(\n"
32
+ ]
33
+ }
34
+ ],
35
+ "source": [
36
+ "image_processor = AutoImageProcessor.from_pretrained('model')\n",
37
+ "model = AutoModelForImageClassification.from_pretrained('model')"
38
+ ]
39
+ },
40
+ {
41
+ "cell_type": "code",
42
+ "execution_count": 4,
43
+ "metadata": {},
44
+ "outputs": [
45
+ {
46
+ "name": "stdout",
47
+ "output_type": "stream",
48
+ "text": [
49
+ "0.9\n"
50
+ ]
51
+ }
52
+ ],
53
+ "source": [
54
+ "import numpy as np\n",
55
+ "a=[0.1,0.9]\n",
56
+ "print(np.max(a))"
57
+ ]
58
+ },
59
+ {
60
+ "cell_type": "code",
61
+ "execution_count": null,
62
+ "metadata": {},
63
+ "outputs": [],
64
+ "source": []
65
+ }
66
+ ],
67
+ "metadata": {
68
+ "kernelspec": {
69
+ "display_name": "base",
70
+ "language": "python",
71
+ "name": "python3"
72
+ },
73
+ "language_info": {
74
+ "codemirror_mode": {
75
+ "name": "ipython",
76
+ "version": 3
77
+ },
78
+ "file_extension": ".py",
79
+ "mimetype": "text/x-python",
80
+ "name": "python",
81
+ "nbconvert_exporter": "python",
82
+ "pygments_lexer": "ipython3",
83
+ "version": "3.10.10"
84
+ }
85
+ },
86
+ "nbformat": 4,
87
+ "nbformat_minor": 2
88
+ }