Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -1,38 +1,25 @@
|
|
1 |
import streamlit as st
|
2 |
-
import os
|
3 |
-
import cv2
|
4 |
-
import torch
|
5 |
-
import torchaudio
|
6 |
-
import torchvision
|
7 |
-
import tensorflow as tf
|
8 |
from transformers import pipeline
|
9 |
-
from groq import Groq
|
10 |
-
from openai import OpenAI
|
11 |
|
12 |
-
# Load
|
13 |
-
GROQ_API_KEY = os.getenv ("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")
|
14 |
-
|
15 |
-
# Initialize groq client
|
16 |
-
client = Groq(api_key=GROQ_API_KEY)
|
17 |
-
|
18 |
-
# Load a fake news detection model from Hugging Face
|
19 |
fake_news_pipeline = pipeline("text-classification", model="mrm8488/bert-tiny-finetuned-fake-news-detection")
|
20 |
|
|
|
|
|
|
|
|
|
|
|
|
|
21 |
# Streamlit UI
|
22 |
st.set_page_config(page_title="Fake News Detector", layout="wide")
|
23 |
st.title("π° Fake News Detector")
|
24 |
|
25 |
# Tabs for Input and Results
|
26 |
-
tab1, tab2, tab3 = st.tabs(["
|
27 |
-
|
28 |
-
# Function to fetch real news links (mocked for now)
|
29 |
-
def fetch_real_news_links():
|
30 |
-
return ["https://www.bbc.com/news", "https://www.cnn.com", "https://www.reuters.com"]
|
31 |
-
|
32 |
|
33 |
with tab1:
|
34 |
st.sidebar.title("Select Input Type")
|
35 |
-
option = st.sidebar.radio("Choose an option", ["Text"
|
36 |
|
37 |
if option == "Text":
|
38 |
news_text = st.text_area("Enter the news content to check:", height=200)
|
@@ -44,51 +31,56 @@ with tab1:
|
|
44 |
st.session_state["analyze"] = True
|
45 |
st.experimental_rerun()
|
46 |
|
47 |
-
elif option == "Image":
|
48 |
-
uploaded_file = st.file_uploader("Upload an image of a news article", type=["jpg", "png", "jpeg"])
|
49 |
-
if uploaded_file is not None:
|
50 |
-
st.image(uploaded_file, caption="Uploaded Image", use_column_width=True)
|
51 |
-
st.info("π Image analysis feature coming soon!")
|
52 |
-
|
53 |
-
elif option == "Video Link":
|
54 |
-
video_url = st.text_input("Enter a video news link to check")
|
55 |
-
if st.button("Analyze Video"):
|
56 |
-
if not video_url.strip():
|
57 |
-
st.warning("Please enter a valid URL.")
|
58 |
-
else:
|
59 |
-
st.info("π Video analysis feature coming soon!")
|
60 |
-
|
61 |
with tab2:
|
62 |
if st.session_state.get("analyze", False):
|
63 |
news_text = st.session_state.get("news_text", "")
|
64 |
with st.spinner("Analyzing..."):
|
65 |
-
# Check using Groq API
|
66 |
-
chat_completion = client.chat.completions.create(
|
67 |
-
messages=[{"role": "user", "content": f"Classify this news as Real or Fake: {news_text}"}],
|
68 |
-
model="llama-3.3-70b-versatile",
|
69 |
-
stream=False,
|
70 |
-
)
|
71 |
-
groq_result = chat_completion.choices[0].message.content.strip().lower()
|
72 |
-
|
73 |
# Check using Hugging Face model
|
74 |
-
|
75 |
-
|
76 |
-
#
|
77 |
-
|
|
|
|
|
|
|
|
|
|
|
78 |
st.error("β This news is likely **Fake**!", icon="β οΈ")
|
79 |
-
conclusion = "The analysis suggests that this news might be fabricated or misleading.
|
80 |
-
elif "real" in
|
81 |
st.success("β
This news is likely **Real**!", icon="β
")
|
82 |
conclusion = "The analysis indicates that this news appears to be credible and factual."
|
83 |
else:
|
84 |
st.info("π€ The result is uncertain. Please verify from trusted sources.")
|
85 |
-
conclusion = "
|
86 |
-
|
87 |
# Conclusion Section
|
88 |
st.subheader("π Conclusion")
|
89 |
st.write(conclusion)
|
90 |
|
91 |
with tab3:
|
92 |
-
st.subheader("π
|
93 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
94 |
st.markdown(f"[π {link}]({link})")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
import streamlit as st
|
|
|
|
|
|
|
|
|
|
|
|
|
2 |
from transformers import pipeline
|
|
|
|
|
3 |
|
4 |
+
# Load Fake News Detection Model from Hugging Face
|
|
|
|
|
|
|
|
|
|
|
|
|
5 |
fake_news_pipeline = pipeline("text-classification", model="mrm8488/bert-tiny-finetuned-fake-news-detection")
|
6 |
|
7 |
+
# Additional models (if available)
|
8 |
+
try:
|
9 |
+
alternative_model = pipeline("text-classification", model="nlptown/bert-base-multilingual-uncased-sentiment")
|
10 |
+
except:
|
11 |
+
alternative_model = None # Fallback in case of failure
|
12 |
+
|
13 |
# Streamlit UI
|
14 |
st.set_page_config(page_title="Fake News Detector", layout="wide")
|
15 |
st.title("π° Fake News Detector")
|
16 |
|
17 |
# Tabs for Input and Results
|
18 |
+
tab1, tab2, tab3 = st.tabs(["Input", "Results", "Reliable News Sources"])
|
|
|
|
|
|
|
|
|
|
|
19 |
|
20 |
with tab1:
|
21 |
st.sidebar.title("Select Input Type")
|
22 |
+
option = st.sidebar.radio("Choose an option", ["Text"])
|
23 |
|
24 |
if option == "Text":
|
25 |
news_text = st.text_area("Enter the news content to check:", height=200)
|
|
|
31 |
st.session_state["analyze"] = True
|
32 |
st.experimental_rerun()
|
33 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
34 |
with tab2:
|
35 |
if st.session_state.get("analyze", False):
|
36 |
news_text = st.session_state.get("news_text", "")
|
37 |
with st.spinner("Analyzing..."):
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
38 |
# Check using Hugging Face model
|
39 |
+
result = fake_news_pipeline(news_text)[0]['label'].lower()
|
40 |
+
|
41 |
+
# Alternative analysis if available
|
42 |
+
alternative_result = None
|
43 |
+
if alternative_model:
|
44 |
+
alternative_result = alternative_model(news_text)[0]['label'].lower()
|
45 |
+
|
46 |
+
# Decision logic
|
47 |
+
if "fake" in result or (alternative_result and "fake" in alternative_result):
|
48 |
st.error("β This news is likely **Fake**!", icon="β οΈ")
|
49 |
+
conclusion = "The analysis suggests that this news might be fabricated or misleading."
|
50 |
+
elif "real" in result or (alternative_result and "real" in alternative_result):
|
51 |
st.success("β
This news is likely **Real**!", icon="β
")
|
52 |
conclusion = "The analysis indicates that this news appears to be credible and factual."
|
53 |
else:
|
54 |
st.info("π€ The result is uncertain. Please verify from trusted sources.")
|
55 |
+
conclusion = "Further verification is recommended."
|
56 |
+
|
57 |
# Conclusion Section
|
58 |
st.subheader("π Conclusion")
|
59 |
st.write(conclusion)
|
60 |
|
61 |
with tab3:
|
62 |
+
st.subheader("π Reliable News Sources")
|
63 |
+
reliable_sources = [
|
64 |
+
"https://www.bbc.com/news",
|
65 |
+
"https://www.cnn.com",
|
66 |
+
"https://www.reuters.com",
|
67 |
+
"https://www.nytimes.com",
|
68 |
+
"https://www.aljazeera.com",
|
69 |
+
"https://www.theguardian.com/international",
|
70 |
+
"https://www.washingtonpost.com",
|
71 |
+
"https://www.npr.org",
|
72 |
+
"https://www.apnews.com"
|
73 |
+
]
|
74 |
+
for link in reliable_sources:
|
75 |
st.markdown(f"[π {link}]({link})")
|
76 |
+
|
77 |
+
st.subheader("π Open Datasets for Fake News Detection")
|
78 |
+
datasets = [
|
79 |
+
"https://huggingface.co/datasets/misinformation/fake_news",
|
80 |
+
"https://huggingface.co/datasets/liar_dataset",
|
81 |
+
"https://huggingface.co/datasets/news-category-dataset",
|
82 |
+
"https://huggingface.co/datasets/ccdv/realnews",
|
83 |
+
"https://huggingface.co/datasets/Shoonya-Data/fact-verification"
|
84 |
+
]
|
85 |
+
for dataset in datasets:
|
86 |
+
st.markdown(f"[π {dataset}]({dataset})")
|