Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -1,25 +1,42 @@
|
|
1 |
import streamlit as st
|
|
|
|
|
|
|
|
|
|
|
|
|
2 |
from transformers import pipeline
|
|
|
|
|
3 |
|
4 |
-
#
|
5 |
-
|
6 |
|
7 |
-
#
|
8 |
-
|
9 |
-
alternative_model = pipeline("text-classification", model="nlptown/bert-base-multilingual-uncased-sentiment")
|
10 |
-
except:
|
11 |
-
alternative_model = None # Fallback in case of failure
|
12 |
|
13 |
# Streamlit UI
|
14 |
st.set_page_config(page_title="Fake News Detector", layout="wide")
|
15 |
st.title("π° Fake News Detector")
|
16 |
|
17 |
# Tabs for Input and Results
|
18 |
-
tab1, tab2
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
19 |
|
20 |
with tab1:
|
21 |
st.sidebar.title("Select Input Type")
|
22 |
-
option = st.sidebar.radio("Choose an option", ["Text"])
|
23 |
|
24 |
if option == "Text":
|
25 |
news_text = st.text_area("Enter the news content to check:", height=200)
|
@@ -29,59 +46,55 @@ with tab1:
|
|
29 |
else:
|
30 |
st.session_state["news_text"] = news_text
|
31 |
st.session_state["analyze"] = True
|
32 |
-
st.
|
33 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
34 |
|
35 |
with tab2:
|
36 |
if st.session_state.get("analyze", False):
|
37 |
news_text = st.session_state.get("news_text", "")
|
38 |
with st.spinner("Analyzing..."):
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
39 |
# Check using Hugging Face model
|
40 |
-
|
41 |
-
|
42 |
-
#
|
43 |
-
|
44 |
-
if alternative_model:
|
45 |
-
alternative_result = alternative_model(news_text)[0]['label'].lower()
|
46 |
-
|
47 |
-
# Decision logic
|
48 |
-
if "fake" in result or (alternative_result and "fake" in alternative_result):
|
49 |
st.error("β This news is likely **Fake**!", icon="β οΈ")
|
50 |
-
|
51 |
-
|
|
|
52 |
st.success("β
This news is likely **Real**!", icon="β
")
|
|
|
53 |
conclusion = "The analysis indicates that this news appears to be credible and factual."
|
54 |
else:
|
55 |
st.info("π€ The result is uncertain. Please verify from trusted sources.")
|
56 |
-
conclusion = "Further verification is recommended."
|
57 |
-
|
58 |
# Conclusion Section
|
59 |
st.subheader("π Conclusion")
|
60 |
st.write(conclusion)
|
61 |
-
|
62 |
-
|
63 |
-
|
64 |
-
|
65 |
-
|
66 |
-
"https://www.cnn.com",
|
67 |
-
"https://www.reuters.com",
|
68 |
-
"https://www.nytimes.com",
|
69 |
-
"https://www.aljazeera.com",
|
70 |
-
"https://www.theguardian.com/international",
|
71 |
-
"https://www.washingtonpost.com",
|
72 |
-
"https://www.npr.org",
|
73 |
-
"https://www.apnews.com"
|
74 |
-
]
|
75 |
-
for link in reliable_sources:
|
76 |
-
st.markdown(f"[π {link}]({link})")
|
77 |
-
|
78 |
-
st.subheader("π Open Datasets for Fake News Detection")
|
79 |
-
datasets = [
|
80 |
-
"https://huggingface.co/datasets/misinformation/fake_news",
|
81 |
-
"https://huggingface.co/datasets/liar_dataset",
|
82 |
-
"https://huggingface.co/datasets/news-category-dataset",
|
83 |
-
"https://huggingface.co/datasets/ccdv/realnews",
|
84 |
-
"https://huggingface.co/datasets/Shoonya-Data/fact-verification"
|
85 |
-
]
|
86 |
-
for dataset in datasets:
|
87 |
-
st.markdown(f"[π {dataset}]({dataset})")
|
|
|
1 |
import streamlit as st
|
2 |
+
import os
|
3 |
+
import cv2
|
4 |
+
import torch
|
5 |
+
import torchaudio
|
6 |
+
import torchvision
|
7 |
+
import tensorflow as tf
|
8 |
from transformers import pipeline
|
9 |
+
from groq import Groq
|
10 |
+
from openai import OpenAI
|
11 |
|
12 |
+
# Set up the Groq client
|
13 |
+
client = Groq(api_key=os.environ.get("GROQ_API_KEY"))
|
14 |
|
15 |
+
# Load a fake news detection model from Hugging Face
|
16 |
+
fake_news_pipeline = pipeline("text-classification", model="mrm8488/bert-tiny-finetuned-fake-news-detection")
|
|
|
|
|
|
|
17 |
|
18 |
# Streamlit UI
|
19 |
st.set_page_config(page_title="Fake News Detector", layout="wide")
|
20 |
st.title("π° Fake News Detector")
|
21 |
|
22 |
# Tabs for Input and Results
|
23 |
+
tab1, tab2 = st.tabs(["Input", "Results"])
|
24 |
+
|
25 |
+
# Function to fetch real news links from various open sources
|
26 |
+
def fetch_real_news_links():
|
27 |
+
return [
|
28 |
+
"https://www.bbc.com/news",
|
29 |
+
"https://www.cnn.com",
|
30 |
+
"https://www.reuters.com",
|
31 |
+
"https://huggingface.co/datasets/misinformation",
|
32 |
+
"https://www.wildfire.ai/deepfake-news-dataset",
|
33 |
+
"https://www.snopes.com",
|
34 |
+
"https://www.factcheck.org"
|
35 |
+
]
|
36 |
|
37 |
with tab1:
|
38 |
st.sidebar.title("Select Input Type")
|
39 |
+
option = st.sidebar.radio("Choose an option", ["Text", "Image", "Video Link"])
|
40 |
|
41 |
if option == "Text":
|
42 |
news_text = st.text_area("Enter the news content to check:", height=200)
|
|
|
46 |
else:
|
47 |
st.session_state["news_text"] = news_text
|
48 |
st.session_state["analyze"] = True
|
49 |
+
st.experimental_rerun()
|
50 |
+
|
51 |
+
elif option == "Image":
|
52 |
+
uploaded_file = st.file_uploader("Upload an image of a news article", type=["jpg", "png", "jpeg"])
|
53 |
+
if uploaded_file is not None:
|
54 |
+
st.image(uploaded_file, caption="Uploaded Image", use_column_width=True)
|
55 |
+
st.info("π Image analysis coming soon!")
|
56 |
+
|
57 |
+
elif option == "Video Link":
|
58 |
+
video_url = st.text_input("Enter a video news link to check")
|
59 |
+
if st.button("Analyze Video"):
|
60 |
+
if not video_url.strip():
|
61 |
+
st.warning("Please enter a valid URL.")
|
62 |
+
else:
|
63 |
+
st.info("π Video analysis coming soon!")
|
64 |
|
65 |
with tab2:
|
66 |
if st.session_state.get("analyze", False):
|
67 |
news_text = st.session_state.get("news_text", "")
|
68 |
with st.spinner("Analyzing..."):
|
69 |
+
# Check using Groq API
|
70 |
+
chat_completion = client.chat.completions.create(
|
71 |
+
messages=[{"role": "user", "content": f"Classify this news as Real or Fake: {news_text}"}],
|
72 |
+
model="llama-3.3-70b-versatile",
|
73 |
+
stream=False,
|
74 |
+
)
|
75 |
+
groq_result = chat_completion.choices[0].message.content.strip().lower()
|
76 |
+
|
77 |
# Check using Hugging Face model
|
78 |
+
hf_result = fake_news_pipeline(news_text)[0]['label'].lower()
|
79 |
+
|
80 |
+
# Display result
|
81 |
+
if "fake" in groq_result or hf_result == "fake":
|
|
|
|
|
|
|
|
|
|
|
82 |
st.error("β This news is likely **Fake**!", icon="β οΈ")
|
83 |
+
st.markdown('<style>div.stAlert {background-color: #ffdddd;}</style>', unsafe_allow_html=True)
|
84 |
+
conclusion = "The analysis suggests that this news might be fabricated or misleading. Please verify from credible sources."
|
85 |
+
elif "real" in groq_result or hf_result == "real":
|
86 |
st.success("β
This news is likely **Real**!", icon="β
")
|
87 |
+
st.markdown('<style>div.stAlert {background-color: #ddffdd;}</style>', unsafe_allow_html=True)
|
88 |
conclusion = "The analysis indicates that this news appears to be credible and factual."
|
89 |
else:
|
90 |
st.info("π€ The result is uncertain. Please verify from trusted sources.")
|
91 |
+
conclusion = "There is uncertainty in the classification. Further verification is recommended."
|
92 |
+
|
93 |
# Conclusion Section
|
94 |
st.subheader("π Conclusion")
|
95 |
st.write(conclusion)
|
96 |
+
|
97 |
+
# Display real news sources
|
98 |
+
st.subheader("π Reliable News Sources")
|
99 |
+
for link in fetch_real_news_links():
|
100 |
+
st.markdown(f"[π {link}]({link})")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|