Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -6,6 +6,9 @@ import torchaudio
|
|
6 |
import torchvision
|
7 |
import tensorflow as tf
|
8 |
from transformers import pipeline
|
|
|
|
|
|
|
9 |
|
10 |
# Load a fake news detection model from Hugging Face
|
11 |
fake_news_pipeline = pipeline("text-classification", model="mrm8488/bert-tiny-finetuned-fake-news-detection")
|
@@ -14,70 +17,77 @@ fake_news_pipeline = pipeline("text-classification", model="mrm8488/bert-tiny-fi
|
|
14 |
st.set_page_config(page_title="Fake News Detector", layout="wide")
|
15 |
st.title("π° Fake News Detector")
|
16 |
|
17 |
-
#
|
18 |
-
|
19 |
-
return [
|
20 |
-
"https://www.bbc.com/news",
|
21 |
-
"https://www.cnn.com",
|
22 |
-
"https://www.reuters.com",
|
23 |
-
"https://huggingface.co/datasets/misinformation",
|
24 |
-
"https://www.wildfire.ai/deepfake-news-dataset",
|
25 |
-
"https://www.snopes.com",
|
26 |
-
"https://www.factcheck.org"
|
27 |
-
]
|
28 |
-
|
29 |
-
# Input Section
|
30 |
-
st.sidebar.title("Select Input Type")
|
31 |
-
option = st.sidebar.radio("Choose an option", ["Text", "Image", "Video Link"])
|
32 |
-
|
33 |
-
if option == "Text":
|
34 |
-
news_text = st.text_area("Enter the news content to check:", height=200)
|
35 |
-
analyze_text = st.button("Analyze Text")
|
36 |
|
37 |
-
|
38 |
-
|
39 |
-
|
40 |
-
|
41 |
-
|
42 |
-
|
43 |
-
|
44 |
-
|
45 |
-
|
46 |
-
|
47 |
-
st.session_state["analyze"] = False
|
48 |
|
49 |
-
|
50 |
-
|
51 |
-
|
52 |
-
|
53 |
-
|
54 |
-
|
55 |
-
|
56 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
57 |
|
58 |
-
|
59 |
-
|
60 |
news_text = st.session_state.get("news_text", "")
|
61 |
-
|
62 |
-
|
63 |
-
if news_text:
|
64 |
hf_result = fake_news_pipeline(news_text)[0]['label'].lower()
|
65 |
-
|
66 |
-
|
67 |
-
|
68 |
-
|
69 |
-
|
70 |
-
|
71 |
-
|
72 |
-
|
73 |
-
|
74 |
-
|
75 |
-
|
76 |
-
|
77 |
-
|
78 |
-
|
79 |
-
|
80 |
-
|
81 |
-
|
82 |
-
|
83 |
-
|
|
|
|
|
|
|
|
|
|
6 |
import torchvision
|
7 |
import tensorflow as tf
|
8 |
from transformers import pipeline
|
9 |
+
from PIL import Image
|
10 |
+
import requests
|
11 |
+
from io import BytesIO
|
12 |
|
13 |
# Load a fake news detection model from Hugging Face
|
14 |
fake_news_pipeline = pipeline("text-classification", model="mrm8488/bert-tiny-finetuned-fake-news-detection")
|
|
|
17 |
st.set_page_config(page_title="Fake News Detector", layout="wide")
|
18 |
st.title("π° Fake News Detector")
|
19 |
|
20 |
+
# Tabs for Input and Results
|
21 |
+
tab1, tab2 = st.tabs(["Input", "Results"])
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
22 |
|
23 |
+
# Function to fetch real news links based on keywords
|
24 |
+
def fetch_real_news_links(query):
|
25 |
+
search_urls = [
|
26 |
+
f"https://www.bbc.co.uk/search?q={query}",
|
27 |
+
f"https://www.cnn.com/search?q={query}",
|
28 |
+
f"https://www.reuters.com/search/news?blob={query}",
|
29 |
+
f"https://www.snopes.com/?s={query}",
|
30 |
+
f"https://www.factcheck.org/search/?q={query}"
|
31 |
+
]
|
32 |
+
return search_urls
|
|
|
33 |
|
34 |
+
with tab1:
|
35 |
+
st.sidebar.title("Select Input Type")
|
36 |
+
option = st.sidebar.radio("Choose an option", ["Text", "Image", "Video Link"])
|
37 |
+
|
38 |
+
if option == "Text":
|
39 |
+
news_text = st.text_area("Enter the news content to check:", height=200)
|
40 |
+
if st.button("Analyze News"):
|
41 |
+
if not news_text.strip():
|
42 |
+
st.warning("Please enter some text.")
|
43 |
+
else:
|
44 |
+
st.session_state["news_text"] = news_text
|
45 |
+
st.session_state["analyze"] = True
|
46 |
+
st.rerun()
|
47 |
+
|
48 |
+
elif option == "Image":
|
49 |
+
uploaded_file = st.file_uploader("Upload an image of a news article", type=["jpg", "png", "jpeg"])
|
50 |
+
if uploaded_file is not None:
|
51 |
+
image = Image.open(uploaded_file)
|
52 |
+
st.image(image, caption="Uploaded Image", use_column_width=True)
|
53 |
+
st.session_state["image_uploaded"] = True
|
54 |
+
st.warning("β οΈ Image analysis is coming soon!")
|
55 |
+
|
56 |
+
elif option == "Video Link":
|
57 |
+
video_url = st.text_input("Enter a video news link to check")
|
58 |
+
if st.button("Analyze Video"):
|
59 |
+
if not video_url.strip():
|
60 |
+
st.warning("Please enter a valid URL.")
|
61 |
+
else:
|
62 |
+
st.session_state["video_url"] = video_url
|
63 |
+
st.warning("β οΈ Video analysis is coming soon!")
|
64 |
|
65 |
+
with tab2:
|
66 |
+
if st.session_state.get("analyze", False):
|
67 |
news_text = st.session_state.get("news_text", "")
|
68 |
+
with st.spinner("Analyzing..."):
|
69 |
+
# Check using Hugging Face model
|
|
|
70 |
hf_result = fake_news_pipeline(news_text)[0]['label'].lower()
|
71 |
+
|
72 |
+
# Display result
|
73 |
+
if hf_result == "fake":
|
74 |
+
st.error("β This news is likely **Fake**!", icon="β οΈ")
|
75 |
+
conclusion = "The analysis suggests that this news might be fabricated or misleading. Please verify from credible sources."
|
76 |
+
real_news_links = fetch_real_news_links(news_text[:50])
|
77 |
+
elif hf_result == "real":
|
78 |
+
st.success("β
This news is likely **Real**!", icon="β
")
|
79 |
+
conclusion = "The analysis indicates that this news appears to be credible and factual."
|
80 |
+
real_news_links = fetch_real_news_links(news_text[:50])
|
81 |
+
else:
|
82 |
+
st.info("π€ The result is uncertain. Please verify from trusted sources.")
|
83 |
+
conclusion = "There is uncertainty in the classification. Further verification is recommended."
|
84 |
+
real_news_links = []
|
85 |
+
|
86 |
+
# Conclusion Section
|
87 |
+
st.subheader("π Conclusion")
|
88 |
+
st.write(conclusion)
|
89 |
+
|
90 |
+
# Display real news sources
|
91 |
+
st.subheader("π Related News Articles")
|
92 |
+
for link in real_news_links:
|
93 |
+
st.markdown(f"[π {link}]({link})")
|