Nimzi commited on
Commit
107dbe8
Β·
verified Β·
1 Parent(s): fc0aaaa

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +46 -54
app.py CHANGED
@@ -1,38 +1,25 @@
1
  import streamlit as st
2
- import os
3
- import cv2
4
- import torch
5
- import torchaudio
6
- import torchvision
7
- import tensorflow as tf
8
  from transformers import pipeline
9
- from groq import Groq
10
- from openai import OpenAI
11
 
12
- # Load Environment variables
13
- GROQ_API_KEY = os.getenv ("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")
14
-
15
- # Initialize groq client
16
- client = Groq(api_key=GROQ_API_KEY)
17
-
18
- # Load a fake news detection model from Hugging Face
19
  fake_news_pipeline = pipeline("text-classification", model="mrm8488/bert-tiny-finetuned-fake-news-detection")
20
 
 
 
 
 
 
 
21
  # Streamlit UI
22
  st.set_page_config(page_title="Fake News Detector", layout="wide")
23
  st.title("πŸ“° Fake News Detector")
24
 
25
  # Tabs for Input and Results
26
- tab1, tab2, tab3 = st.tabs(["πŸ” Input", "πŸ“Š Results", "πŸ”— Verified News"])
27
-
28
- # Function to fetch real news links (mocked for now)
29
- def fetch_real_news_links():
30
- return ["https://www.bbc.com/news", "https://www.cnn.com", "https://www.reuters.com"]
31
-
32
 
33
  with tab1:
34
  st.sidebar.title("Select Input Type")
35
- option = st.sidebar.radio("Choose an option", ["Text", "Image", "Video Link"])
36
 
37
  if option == "Text":
38
  news_text = st.text_area("Enter the news content to check:", height=200)
@@ -44,51 +31,56 @@ with tab1:
44
  st.session_state["analyze"] = True
45
  st.experimental_rerun()
46
 
47
- elif option == "Image":
48
- uploaded_file = st.file_uploader("Upload an image of a news article", type=["jpg", "png", "jpeg"])
49
- if uploaded_file is not None:
50
- st.image(uploaded_file, caption="Uploaded Image", use_column_width=True)
51
- st.info("πŸ” Image analysis feature coming soon!")
52
-
53
- elif option == "Video Link":
54
- video_url = st.text_input("Enter a video news link to check")
55
- if st.button("Analyze Video"):
56
- if not video_url.strip():
57
- st.warning("Please enter a valid URL.")
58
- else:
59
- st.info("πŸ” Video analysis feature coming soon!")
60
-
61
  with tab2:
62
  if st.session_state.get("analyze", False):
63
  news_text = st.session_state.get("news_text", "")
64
  with st.spinner("Analyzing..."):
65
- # Check using Groq API
66
- chat_completion = client.chat.completions.create(
67
- messages=[{"role": "user", "content": f"Classify this news as Real or Fake: {news_text}"}],
68
- model="llama-3.3-70b-versatile",
69
- stream=False,
70
- )
71
- groq_result = chat_completion.choices[0].message.content.strip().lower()
72
-
73
  # Check using Hugging Face model
74
- hf_result = fake_news_pipeline(news_text)[0]['label'].lower()
75
-
76
- # Display result
77
- if "fake" in groq_result or hf_result == "fake":
 
 
 
 
 
78
  st.error("❌ This news is likely **Fake**!", icon="⚠️")
79
- conclusion = "The analysis suggests that this news might be fabricated or misleading. Please verify from credible sources."
80
- elif "real" in groq_result or hf_result == "real":
81
  st.success("βœ… This news is likely **Real**!", icon="βœ…")
82
  conclusion = "The analysis indicates that this news appears to be credible and factual."
83
  else:
84
  st.info("πŸ€” The result is uncertain. Please verify from trusted sources.")
85
- conclusion = "There is uncertainty in the classification. Further verification is recommended."
86
-
87
  # Conclusion Section
88
  st.subheader("πŸ“Œ Conclusion")
89
  st.write(conclusion)
90
 
91
  with tab3:
92
- st.subheader("πŸ”— Verified News Sources")
93
- for link in fetch_real_news_links():
 
 
 
 
 
 
 
 
 
 
 
94
  st.markdown(f"[πŸ”— {link}]({link})")
 
 
 
 
 
 
 
 
 
 
 
 
1
  import streamlit as st
 
 
 
 
 
 
2
  from transformers import pipeline
 
 
3
 
4
+ # Load Fake News Detection Model from Hugging Face
 
 
 
 
 
 
5
  fake_news_pipeline = pipeline("text-classification", model="mrm8488/bert-tiny-finetuned-fake-news-detection")
6
 
7
+ # Additional models (if available)
8
+ try:
9
+ alternative_model = pipeline("text-classification", model="nlptown/bert-base-multilingual-uncased-sentiment")
10
+ except:
11
+ alternative_model = None # Fallback in case of failure
12
+
13
  # Streamlit UI
14
  st.set_page_config(page_title="Fake News Detector", layout="wide")
15
  st.title("πŸ“° Fake News Detector")
16
 
17
  # Tabs for Input and Results
18
+ tab1, tab2, tab3 = st.tabs(["Input", "Results", "Reliable News Sources"])
 
 
 
 
 
19
 
20
  with tab1:
21
  st.sidebar.title("Select Input Type")
22
+ option = st.sidebar.radio("Choose an option", ["Text"])
23
 
24
  if option == "Text":
25
  news_text = st.text_area("Enter the news content to check:", height=200)
 
31
  st.session_state["analyze"] = True
32
  st.experimental_rerun()
33
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
34
  with tab2:
35
  if st.session_state.get("analyze", False):
36
  news_text = st.session_state.get("news_text", "")
37
  with st.spinner("Analyzing..."):
 
 
 
 
 
 
 
 
38
  # Check using Hugging Face model
39
+ result = fake_news_pipeline(news_text)[0]['label'].lower()
40
+
41
+ # Alternative analysis if available
42
+ alternative_result = None
43
+ if alternative_model:
44
+ alternative_result = alternative_model(news_text)[0]['label'].lower()
45
+
46
+ # Decision logic
47
+ if "fake" in result or (alternative_result and "fake" in alternative_result):
48
  st.error("❌ This news is likely **Fake**!", icon="⚠️")
49
+ conclusion = "The analysis suggests that this news might be fabricated or misleading."
50
+ elif "real" in result or (alternative_result and "real" in alternative_result):
51
  st.success("βœ… This news is likely **Real**!", icon="βœ…")
52
  conclusion = "The analysis indicates that this news appears to be credible and factual."
53
  else:
54
  st.info("πŸ€” The result is uncertain. Please verify from trusted sources.")
55
+ conclusion = "Further verification is recommended."
56
+
57
  # Conclusion Section
58
  st.subheader("πŸ“Œ Conclusion")
59
  st.write(conclusion)
60
 
61
  with tab3:
62
+ st.subheader("πŸ”— Reliable News Sources")
63
+ reliable_sources = [
64
+ "https://www.bbc.com/news",
65
+ "https://www.cnn.com",
66
+ "https://www.reuters.com",
67
+ "https://www.nytimes.com",
68
+ "https://www.aljazeera.com",
69
+ "https://www.theguardian.com/international",
70
+ "https://www.washingtonpost.com",
71
+ "https://www.npr.org",
72
+ "https://www.apnews.com"
73
+ ]
74
+ for link in reliable_sources:
75
  st.markdown(f"[πŸ”— {link}]({link})")
76
+
77
+ st.subheader("πŸ“š Open Datasets for Fake News Detection")
78
+ datasets = [
79
+ "https://huggingface.co/datasets/misinformation/fake_news",
80
+ "https://huggingface.co/datasets/liar_dataset",
81
+ "https://huggingface.co/datasets/news-category-dataset",
82
+ "https://huggingface.co/datasets/ccdv/realnews",
83
+ "https://huggingface.co/datasets/Shoonya-Data/fact-verification"
84
+ ]
85
+ for dataset in datasets:
86
+ st.markdown(f"[πŸ“‚ {dataset}]({dataset})")