Spaces:
Sleeping
Sleeping
File size: 5,444 Bytes
62c311b a7f66d4 81a136d c8d1c47 7be5b04 71c7fc8 7be5b04 71c7fc8 7be5b04 71c7fc8 7be5b04 71c7fc8 5bc427c 71c7fc8 5bc427c a7f66d4 71c7fc8 ea6e24c 7be5b04 71c7fc8 7be5b04 71c7fc8 a7f66d4 71c7fc8 7e55fba 71c7fc8 7be5b04 71c7fc8 7be5b04 71c7fc8 7be5b04 71c7fc8 7be5b04 71c7fc8 a5b7aa4 7be5b04 71c7fc8 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 |
import streamlit as st
import requests
from transformers import pipeline
from PIL import Image
import torch
import torchvision.transforms as transforms
import cv2
import numpy as np
from deepface import DeepFace
from bs4 import BeautifulSoup
# Load Fake News Detection Model (Text)
fake_news_pipeline = pipeline("text-classification", model="mrm8488/bert-tiny-finetuned-fake-news-detection")
# Function to classify text as Fake or Real
def classify_text(news_text):
result = fake_news_pipeline(news_text)[0]
label = result['label'].lower()
score = result['score'] * 100 # Convert to percentage
return ("Fake" if label == "fake" else "Real"), round(score, 2)
# Function to analyze image authenticity
def analyze_image(image):
try:
image_array = np.array(image)
result = DeepFace.analyze(image_array, actions=["age", "gender", "race"], enforce_detection=False)
return "Real" if result else "Fake", 90 # Placeholder accuracy
except Exception as e:
return "Error", str(e)
# Function to verify news from open sources
def verify_news(news_text):
search_url = f"https://www.google.com/search?q={'+'.join(news_text.split())}"
response = requests.get(search_url)
soup = BeautifulSoup(response.text, "html.parser")
results = []
for link in soup.find_all("a", href=True):
if "http" in link["href"] and "google" not in link["href"]:
results.append((link.text.strip(), link["href"]))
if len(results) >= 3: # Limit to 3 sources
break
return results
# Streamlit UI Configuration
st.set_page_config(page_title="Fake News Detector", layout="wide")
st.title("π° Fake News Detector")
# Sidebar Input Selection
st.sidebar.title("Select Input Type")
option = st.sidebar.radio("Choose an option", ["Text", "Image", "Video Link"])
# Session Variables
if "result_text" not in st.session_state:
st.session_state["result_text"] = None
if "accuracy_text" not in st.session_state:
st.session_state["accuracy_text"] = None
if "result_image" not in st.session_state:
st.session_state["result_image"] = None
if "accuracy_image" not in st.session_state:
st.session_state["accuracy_image"] = None
if "video_result" not in st.session_state:
st.session_state["video_result"] = None
# Input Section
if option == "Text":
news_text = st.text_area("Enter the news content to check:", height=200)
analyze_text_clicked = st.button("Analyze News")
if analyze_text_clicked:
if not news_text.strip():
st.warning("Please enter some text.")
else:
result, accuracy = classify_text(news_text)
st.session_state["result_text"] = result
st.session_state["accuracy_text"] = accuracy
verification_links = verify_news(news_text)
st.session_state["verification_text"] = verification_links
elif option == "Image":
uploaded_image = st.file_uploader("Upload a news image", type=["jpg", "png", "jpeg"])
analyze_image_clicked = st.button("Analyze Image")
if uploaded_image and analyze_image_clicked:
image = Image.open(uploaded_image)
result, accuracy = analyze_image(image)
st.session_state["result_image"] = result
st.session_state["accuracy_image"] = accuracy
elif option == "Video Link":
video_url = st.text_input("Enter the video link:")
analyze_video_clicked = st.button("Analyze Video")
if analyze_video_clicked:
if not video_url.strip():
st.warning("Please enter a valid video link.")
else:
st.session_state["video_result"] = "Real" # Placeholder (Video verification requires advanced models)
# Results Section
st.subheader("π Analysis Results")
# Text Results
if st.session_state.get("result_text"):
result = st.session_state["result_text"]
accuracy = st.session_state["accuracy_text"]
st.subheader("π Text Analysis")
if result == "Fake":
st.error(f"β This news is likely **Fake**! (Accuracy: {accuracy}%)", icon="β οΈ")
else:
st.success(f"β
This news is likely **Real**! (Accuracy: {accuracy}%)", icon="β
")
st.subheader("π Verification & Trusted Sources")
sources = [
"https://www.bbc.com/news",
"https://www.cnn.com",
"https://www.reuters.com",
"https://factcheck.org",
"https://www.snopes.com",
"https://www.politifact.com"
]
for link in sources:
st.markdown(f"[π {link}]({link})")
if "verification_text" in st.session_state:
for name, link in st.session_state["verification_text"]:
st.markdown(f"[π {name}]({link})")
# Image Results
if st.session_state.get("result_image"):
result = st.session_state["result_image"]
accuracy = st.session_state["accuracy_image"]
st.subheader("πΌοΈ Image Analysis")
if result == "Fake":
st.error(f"β This image is likely **Fake**! (Accuracy: {accuracy}%)", icon="β οΈ")
else:
st.success(f"β
This image is likely **Real**! (Accuracy: {accuracy}%)", icon="β
")
# Video Results
if st.session_state.get("video_result"):
result = st.session_state["video_result"]
st.subheader("πΉ Video Analysis")
if result == "Fake":
st.error("β This video is likely **Fake**!", icon="β οΈ")
else:
st.success("β
This video is likely **Real**!", icon="β
")
|