Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -1,150 +1,94 @@
|
|
1 |
import streamlit as st
|
2 |
import requests
|
3 |
from transformers import pipeline
|
|
|
4 |
from PIL import Image
|
5 |
import torch
|
6 |
import torchvision.transforms as transforms
|
7 |
import cv2
|
8 |
import numpy as np
|
9 |
-
from deepface import DeepFace
|
10 |
from bs4 import BeautifulSoup
|
|
|
11 |
|
12 |
-
# Load Fake News Detection Model
|
13 |
fake_news_pipeline = pipeline("text-classification", model="mrm8488/bert-tiny-finetuned-fake-news-detection")
|
14 |
|
15 |
-
# Function to classify text as Fake or Real
|
16 |
def classify_text(news_text):
|
17 |
result = fake_news_pipeline(news_text)[0]
|
18 |
label = result['label'].lower()
|
19 |
score = result['score'] * 100 # Convert to percentage
|
20 |
return ("Fake" if label == "fake" else "Real"), round(score, 2)
|
21 |
|
22 |
-
# Function to analyze image authenticity
|
23 |
def analyze_image(image):
|
24 |
try:
|
25 |
-
|
26 |
-
|
27 |
-
|
28 |
-
|
29 |
-
|
|
|
|
|
30 |
|
31 |
-
# Function to verify news from open sources
|
32 |
def verify_news(news_text):
|
33 |
search_url = f"https://www.google.com/search?q={'+'.join(news_text.split())}"
|
34 |
-
|
35 |
-
soup = BeautifulSoup(response.text, "html.parser")
|
36 |
-
|
37 |
-
results = []
|
38 |
-
for link in soup.find_all("a", href=True):
|
39 |
-
if "http" in link["href"] and "google" not in link["href"]:
|
40 |
-
results.append((link.text.strip(), link["href"]))
|
41 |
-
if len(results) >= 3: # Limit to 3 sources
|
42 |
-
break
|
43 |
-
return results
|
44 |
|
45 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
46 |
st.set_page_config(page_title="Fake News Detector", layout="wide")
|
47 |
st.title("π° Fake News Detector")
|
48 |
|
49 |
-
# Sidebar Input Selection
|
50 |
st.sidebar.title("Select Input Type")
|
51 |
option = st.sidebar.radio("Choose an option", ["Text", "Image", "Video Link"])
|
52 |
|
53 |
-
# Session Variables
|
54 |
-
if "result_text" not in st.session_state:
|
55 |
-
st.session_state["result_text"] = None
|
56 |
-
if "accuracy_text" not in st.session_state:
|
57 |
-
st.session_state["accuracy_text"] = None
|
58 |
-
if "result_image" not in st.session_state:
|
59 |
-
st.session_state["result_image"] = None
|
60 |
-
if "accuracy_image" not in st.session_state:
|
61 |
-
st.session_state["accuracy_image"] = None
|
62 |
-
if "video_result" not in st.session_state:
|
63 |
-
st.session_state["video_result"] = None
|
64 |
-
|
65 |
# Input Section
|
66 |
if option == "Text":
|
67 |
news_text = st.text_area("Enter the news content to check:", height=200)
|
68 |
analyze_text_clicked = st.button("Analyze News")
|
69 |
-
|
70 |
-
|
71 |
-
|
72 |
-
|
73 |
-
else:
|
74 |
-
|
75 |
-
|
76 |
-
st.
|
77 |
-
|
78 |
-
st.session_state["verification_text"] = verification_links
|
79 |
|
80 |
elif option == "Image":
|
81 |
uploaded_image = st.file_uploader("Upload a news image", type=["jpg", "png", "jpeg"])
|
82 |
analyze_image_clicked = st.button("Analyze Image")
|
83 |
-
|
84 |
if uploaded_image and analyze_image_clicked:
|
85 |
image = Image.open(uploaded_image)
|
86 |
-
|
87 |
-
|
88 |
-
st.
|
89 |
|
90 |
elif option == "Video Link":
|
91 |
video_url = st.text_input("Enter the video link:")
|
92 |
analyze_video_clicked = st.button("Analyze Video")
|
93 |
-
|
94 |
-
|
95 |
-
|
96 |
-
|
97 |
-
else:
|
98 |
-
st.session_state["video_result"] = "Real" # Placeholder (Video verification requires advanced models)
|
99 |
-
|
100 |
-
# Results Section
|
101 |
-
st.subheader("π Analysis Results")
|
102 |
-
|
103 |
-
# Text Results
|
104 |
-
if st.session_state.get("result_text"):
|
105 |
-
result = st.session_state["result_text"]
|
106 |
-
accuracy = st.session_state["accuracy_text"]
|
107 |
-
st.subheader("π Text Analysis")
|
108 |
-
|
109 |
-
if result == "Fake":
|
110 |
-
st.error(f"β This news is likely **Fake**! (Accuracy: {accuracy}%)", icon="β οΈ")
|
111 |
-
else:
|
112 |
-
st.success(f"β
This news is likely **Real**! (Accuracy: {accuracy}%)", icon="β
")
|
113 |
-
|
114 |
-
st.subheader("π Verification & Trusted Sources")
|
115 |
-
sources = [
|
116 |
-
"https://www.bbc.com/news",
|
117 |
-
"https://www.cnn.com",
|
118 |
-
"https://www.reuters.com",
|
119 |
-
"https://factcheck.org",
|
120 |
-
"https://www.snopes.com",
|
121 |
-
"https://www.politifact.com"
|
122 |
-
]
|
123 |
-
for link in sources:
|
124 |
-
st.markdown(f"[π {link}]({link})")
|
125 |
-
|
126 |
-
if "verification_text" in st.session_state:
|
127 |
-
for name, link in st.session_state["verification_text"]:
|
128 |
-
st.markdown(f"[π {name}]({link})")
|
129 |
-
|
130 |
-
# Image Results
|
131 |
-
if st.session_state.get("result_image"):
|
132 |
-
result = st.session_state["result_image"]
|
133 |
-
accuracy = st.session_state["accuracy_image"]
|
134 |
-
st.subheader("πΌοΈ Image Analysis")
|
135 |
-
|
136 |
-
if result == "Fake":
|
137 |
-
st.error(f"β This image is likely **Fake**! (Accuracy: {accuracy}%)", icon="β οΈ")
|
138 |
-
else:
|
139 |
-
st.success(f"β
This image is likely **Real**! (Accuracy: {accuracy}%)", icon="β
")
|
140 |
-
|
141 |
-
# Video Results
|
142 |
-
if st.session_state.get("video_result"):
|
143 |
-
result = st.session_state["video_result"]
|
144 |
-
st.subheader("πΉ Video Analysis")
|
145 |
-
|
146 |
-
if result == "Fake":
|
147 |
-
st.error("β This video is likely **Fake**!", icon="β οΈ")
|
148 |
-
else:
|
149 |
-
st.success("β
This video is likely **Real**!", icon="β
")
|
150 |
-
|
|
|
1 |
import streamlit as st
|
2 |
import requests
|
3 |
from transformers import pipeline
|
4 |
+
from deepface import DeepFace
|
5 |
from PIL import Image
|
6 |
import torch
|
7 |
import torchvision.transforms as transforms
|
8 |
import cv2
|
9 |
import numpy as np
|
|
|
10 |
from bs4 import BeautifulSoup
|
11 |
+
import re
|
12 |
|
13 |
+
# Load Fake News Detection Model
|
14 |
fake_news_pipeline = pipeline("text-classification", model="mrm8488/bert-tiny-finetuned-fake-news-detection")
|
15 |
|
|
|
16 |
def classify_text(news_text):
|
17 |
result = fake_news_pipeline(news_text)[0]
|
18 |
label = result['label'].lower()
|
19 |
score = result['score'] * 100 # Convert to percentage
|
20 |
return ("Fake" if label == "fake" else "Real"), round(score, 2)
|
21 |
|
|
|
22 |
def analyze_image(image):
|
23 |
try:
|
24 |
+
result = DeepFace.analyze(image, actions=['emotion'], enforce_detection=False)
|
25 |
+
return "Fake" if result[0]['dominant_emotion'] in ['disgust', 'fear', 'sad'] else "Real"
|
26 |
+
except:
|
27 |
+
return "Error in analyzing image."
|
28 |
+
|
29 |
+
def analyze_video(video_url):
|
30 |
+
return "Video analysis feature coming soon!"
|
31 |
|
|
|
32 |
def verify_news(news_text):
|
33 |
search_url = f"https://www.google.com/search?q={'+'.join(news_text.split())}"
|
34 |
+
return search_url
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
35 |
|
36 |
+
def scrape_verification_links(news_text):
|
37 |
+
sources = [
|
38 |
+
"https://www.bbc.com/news",
|
39 |
+
"https://www.cnn.com",
|
40 |
+
"https://www.reuters.com",
|
41 |
+
"https://factcheck.org",
|
42 |
+
"https://www.snopes.com",
|
43 |
+
"https://www.politifact.com"
|
44 |
+
]
|
45 |
+
verification_links = {}
|
46 |
+
for source in sources:
|
47 |
+
try:
|
48 |
+
response = requests.get(source)
|
49 |
+
soup = BeautifulSoup(response.text, 'html.parser')
|
50 |
+
for link in soup.find_all('a', href=True):
|
51 |
+
if re.search(news_text[:5], link.text, re.IGNORECASE):
|
52 |
+
verification_links[link.text] = source + link['href']
|
53 |
+
except:
|
54 |
+
continue
|
55 |
+
return verification_links
|
56 |
+
|
57 |
+
# Streamlit UI
|
58 |
st.set_page_config(page_title="Fake News Detector", layout="wide")
|
59 |
st.title("π° Fake News Detector")
|
60 |
|
61 |
+
# Sidebar for Input Selection
|
62 |
st.sidebar.title("Select Input Type")
|
63 |
option = st.sidebar.radio("Choose an option", ["Text", "Image", "Video Link"])
|
64 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
65 |
# Input Section
|
66 |
if option == "Text":
|
67 |
news_text = st.text_area("Enter the news content to check:", height=200)
|
68 |
analyze_text_clicked = st.button("Analyze News")
|
69 |
+
if analyze_text_clicked and news_text.strip():
|
70 |
+
result, accuracy = classify_text(news_text)
|
71 |
+
verification_links = scrape_verification_links(news_text)
|
72 |
+
st.subheader("π Analysis Results")
|
73 |
+
st.error(f"β This news is likely **Fake**! (Accuracy: {accuracy}%)") if result == "Fake" else st.success(f"β
This news is likely **Real**! (Accuracy: {accuracy}%)")
|
74 |
+
st.subheader("π Verification & Trusted Sources")
|
75 |
+
for title, link in verification_links.items():
|
76 |
+
st.markdown(f"[π {title}]({link})")
|
77 |
+
st.markdown(f"[π Verify on Google]({verify_news(news_text)})")
|
|
|
78 |
|
79 |
elif option == "Image":
|
80 |
uploaded_image = st.file_uploader("Upload a news image", type=["jpg", "png", "jpeg"])
|
81 |
analyze_image_clicked = st.button("Analyze Image")
|
|
|
82 |
if uploaded_image and analyze_image_clicked:
|
83 |
image = Image.open(uploaded_image)
|
84 |
+
st.image(image, caption="Uploaded Image", use_column_width=True)
|
85 |
+
result = analyze_image(np.array(image))
|
86 |
+
st.error("β This image is likely **Fake**!") if result == "Fake" else st.success("β
This image is likely **Real**!")
|
87 |
|
88 |
elif option == "Video Link":
|
89 |
video_url = st.text_input("Enter the video link:")
|
90 |
analyze_video_clicked = st.button("Analyze Video")
|
91 |
+
if analyze_video_clicked and video_url.strip():
|
92 |
+
st.video(video_url)
|
93 |
+
result = analyze_video(video_url)
|
94 |
+
st.info(result)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|