ashish-001 commited on
Commit
7c09e5b
·
verified ·
1 Parent(s): 6d37900

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +104 -102
app.py CHANGED
@@ -1,102 +1,104 @@
1
- import streamlit as st
2
- from transformers import AutoImageProcessor, AutoModelForImageClassification
3
- import cv2
4
- import torch
5
- import numpy as np
6
- import tempfile
7
-
8
- image_processor = AutoImageProcessor.from_pretrained(
9
- 'ashish-001/deepfake-detection-using-ViT')
10
- model = AutoModelForImageClassification.from_pretrained(
11
- 'ashish-001/deepfake-detection-using-ViT')
12
-
13
-
14
- def classify_frame(frame):
15
- inputs = image_processor(images=frame, return_tensors="pt")
16
- outputs = model(**inputs)
17
- logits = outputs.logits
18
- probs = torch.nn.functional.sigmoid(logits)
19
- pred = torch.argmax(logits, dim=1).item()
20
- lab = 'Real' if pred == 1 else 'Fake'
21
- confidence, _ = torch.max(probs, dim=1)
22
- return f"{lab}::{format(confidence.item(), '.2f')}"
23
-
24
-
25
- st.title("Deepfake detector")
26
- uploaded_file = st.file_uploader(
27
- "Upload an image or video",
28
- type=["jpg", "jpeg", "png", "mp4", "avi", "mov", "mkv"]
29
- )
30
- placeholder = st.empty()
31
- if st.button('Detect'):
32
- if uploaded_file is not None:
33
- clf = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
34
- mime_type = uploaded_file.type
35
- if mime_type.startswith("image"):
36
- file_bytes = uploaded_file.read()
37
- np_arr = np.frombuffer(file_bytes, np.uint8)
38
- image = cv2.imdecode(np_arr, cv2.IMREAD_COLOR)
39
- image_rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
40
- gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
41
- faces = clf.detectMultiScale(
42
- gray, scaleFactor=1.3, minNeighbors=5)
43
- for (x, y, w, h) in faces:
44
- cv2.rectangle(image_rgb, (x, y), (x+w, y+h), (0, 0, 255), 2)
45
- face = image_rgb[y:y + h, x:x + w]
46
- img = cv2.cvtColor(face, cv2.COLOR_BGR2RGB)
47
- label = classify_frame(img)
48
- new_frame = cv2.putText(
49
- image_rgb, label, (x, y+h+20), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 0), 2)
50
- st.image(new_frame)
51
-
52
- elif mime_type.startswith('video'):
53
- with tempfile.NamedTemporaryFile(delete=False, suffix=".mp4") as temp_file:
54
- temp_file.write(uploaded_file.read())
55
- temp_video_path = temp_file.name
56
- cap = cv2.VideoCapture(temp_video_path)
57
- if not cap.isOpened():
58
- st.error("Error: Cannot open video file.")
59
- else:
60
- while True:
61
- ret, frame = cap.read()
62
- if not ret:
63
- break
64
- frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
65
- gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
66
- faces = clf.detectMultiScale(
67
- gray, scaleFactor=1.3, minNeighbors=5)
68
- for (x, y, w, h) in faces:
69
- cv2.rectangle(
70
- frame, (x, y), (x+w, y+h), (0, 0, 255), 2)
71
- face = frame[y:y + h, x:x + w]
72
- img = cv2.cvtColor(face, cv2.COLOR_BGR2RGB)
73
- label = classify_frame(img)
74
- frame = cv2.putText(
75
- frame, label, (x, y+h+20), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 0), 2)
76
- placeholder.image(frame)
77
- cap.release()
78
-
79
- if st.button('Use Example Video'):
80
- clf = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
81
- cap = cv2.VideoCapture("Sample.mp4")
82
- if not cap.isOpened():
83
- st.error("Error: Cannot open video file.")
84
- else:
85
- st.write(f"Video credits: 'Deep Fakes' Are Becoming More Realistic Thanks To New Technology. Link:https://www.youtube.com/watch?v=CDMVaQOvtxU")
86
- while True:
87
- ret, frame = cap.read()
88
- if not ret:
89
- break
90
- frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
91
- gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
92
- faces = clf.detectMultiScale(gray, scaleFactor=1.3, minNeighbors=5)
93
- for (x, y, w, h) in faces:
94
- cv2.rectangle(
95
- frame, (x, y), (x+w, y+h), (0, 0, 255), 2)
96
- face = frame[y:y + h, x:x + w]
97
- img = cv2.cvtColor(face, cv2.COLOR_BGR2RGB)
98
- label = classify_frame(img)
99
- frame = cv2.putText(
100
- frame, label, (x, y+h+20), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 0), 2)
101
- placeholder.image(frame)
102
- cap.release()
 
 
 
1
+ import streamlit as st
2
+ from transformers import AutoImageProcessor, AutoModelForImageClassification
3
+ import cv2
4
+ import torch
5
+ import numpy as np
6
+ import tempfile
7
+
8
+ image_processor = AutoImageProcessor.from_pretrained(
9
+ 'ashish-001/deepfake-detection-using-ViT')
10
+ model = AutoModelForImageClassification.from_pretrained(
11
+ 'ashish-001/deepfake-detection-using-ViT')
12
+
13
+
14
+ def classify_frame(frame):
15
+ inputs = image_processor(images=frame, return_tensors="pt")
16
+ outputs = model(**inputs)
17
+ logits = outputs.logits
18
+ probs = torch.nn.functional.sigmoid(logits)
19
+ pred = torch.argmax(logits, dim=1).item()
20
+ lab = 'Real' if pred == 1 else 'Fake'
21
+ confidence, _ = torch.max(probs, dim=1)
22
+ return f"{lab}::{format(confidence.item(), '.2f')}"
23
+
24
+
25
+ st.title("Deepfake detector")
26
+ uploaded_file = st.file_uploader(
27
+ "Upload an image or video",
28
+ type=["jpg", "jpeg", "png", "mp4", "avi", "mov", "mkv"]
29
+ )
30
+ placeholder = st.empty()
31
+ if st.button('Detect'):
32
+ if uploaded_file is not None:
33
+ clf = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
34
+ mime_type = uploaded_file.type
35
+ if mime_type.startswith("image"):
36
+ file_bytes = uploaded_file.read()
37
+ np_arr = np.frombuffer(file_bytes, np.uint8)
38
+ image = cv2.imdecode(np_arr, cv2.IMREAD_COLOR)
39
+ image_rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
40
+ gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
41
+ faces = clf.detectMultiScale(
42
+ gray, scaleFactor=1.3, minNeighbors=5)
43
+ for (x, y, w, h) in faces:
44
+ cv2.rectangle(image_rgb, (x, y), (x+w, y+h), (0, 0, 255), 2)
45
+ face = image_rgb[y:y + h, x:x + w]
46
+ img = cv2.cvtColor(face, cv2.COLOR_BGR2RGB)
47
+ label = classify_frame(img)
48
+ new_frame = cv2.putText(
49
+ image_rgb, label, (x, y+h+20), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 0), 2)
50
+ st.image(new_frame)
51
+
52
+ elif mime_type.startswith('video'):
53
+ with tempfile.NamedTemporaryFile(delete=False, suffix=".mp4") as temp_file:
54
+ temp_file.write(uploaded_file.read())
55
+ temp_video_path = temp_file.name
56
+ cap = cv2.VideoCapture(temp_video_path)
57
+ if not cap.isOpened():
58
+ st.error("Error: Cannot open video file.")
59
+ else:
60
+ while True:
61
+ ret, frame = cap.read()
62
+ if not ret:
63
+ break
64
+ frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
65
+ gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
66
+ faces = clf.detectMultiScale(
67
+ gray, scaleFactor=1.3, minNeighbors=5)
68
+ for (x, y, w, h) in faces:
69
+ cv2.rectangle(
70
+ frame, (x, y), (x+w, y+h), (0, 0, 255), 2)
71
+ face = frame[y:y + h, x:x + w]
72
+ img = cv2.cvtColor(face, cv2.COLOR_BGR2RGB)
73
+ label = classify_frame(img)
74
+ frame = cv2.putText(
75
+ frame, label, (x, y+h+20), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 0), 2)
76
+ placeholder.image(frame)
77
+ cap.release()
78
+
79
+ else:
80
+ st.write("Please upload an image or video")
81
+ if st.button('Use Example Video'):
82
+ clf = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
83
+ cap = cv2.VideoCapture("Sample.mp4")
84
+ if not cap.isOpened():
85
+ st.error("Error: Cannot open video file.")
86
+ else:
87
+ st.write(f"Video credits: 'Deep Fakes' Are Becoming More Realistic Thanks To New Technology. Link:https://www.youtube.com/watch?v=CDMVaQOvtxU")
88
+ while True:
89
+ ret, frame = cap.read()
90
+ if not ret:
91
+ break
92
+ frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
93
+ gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
94
+ faces = clf.detectMultiScale(gray, scaleFactor=1.3, minNeighbors=5)
95
+ for (x, y, w, h) in faces:
96
+ cv2.rectangle(
97
+ frame, (x, y), (x+w, y+h), (0, 0, 255), 2)
98
+ face = frame[y:y + h, x:x + w]
99
+ img = cv2.cvtColor(face, cv2.COLOR_BGR2RGB)
100
+ label = classify_frame(img)
101
+ frame = cv2.putText(
102
+ frame, label, (x, y+h+20), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 0), 2)
103
+ placeholder.image(frame)
104
+ cap.release()