Spaces:
Running
Running
import streamlit as st | |
import requests | |
from transformers import pipeline | |
from deepface import DeepFace | |
from PIL import Image | |
import torch | |
import torchvision.transforms as transforms | |
import cv2 | |
import numpy as np | |
from bs4 import BeautifulSoup | |
import re | |
# Load Fake News Detection Model | |
fake_news_pipeline = pipeline("text-classification", model="mrm8488/bert-tiny-finetuned-fake-news-detection") | |
def classify_text(news_text): | |
result = fake_news_pipeline(news_text)[0] | |
label = result['label'].lower() | |
score = result['score'] * 100 # Convert to percentage | |
return ("Fake" if label == "fake" else "Real"), round(score, 2) | |
def analyze_image(image): | |
try: | |
result = DeepFace.analyze(image, actions=['emotion'], enforce_detection=False) | |
return "Fake" if result[0]['dominant_emotion'] in ['disgust', 'fear', 'sad'] else "Real" | |
except: | |
return "Error in analyzing image." | |
def analyze_video(video_url): | |
return "Video analysis feature coming soon!" | |
def verify_news(news_text): | |
search_url = f"https://www.google.com/search?q={'+'.join(news_text.split())}" | |
return search_url | |
def scrape_verification_links(news_text): | |
sources = [ | |
"https://www.bbc.com/news", | |
"https://www.cnn.com", | |
"https://www.reuters.com", | |
"https://factcheck.org", | |
"https://www.snopes.com", | |
"https://www.politifact.com" | |
] | |
verification_links = {} | |
for source in sources: | |
try: | |
response = requests.get(source) | |
soup = BeautifulSoup(response.text, 'html.parser') | |
for link in soup.find_all('a', href=True): | |
if re.search(news_text[:5], link.text, re.IGNORECASE): | |
verification_links[link.text] = source + link['href'] | |
except: | |
continue | |
return verification_links | |
# Streamlit UI | |
st.set_page_config(page_title="Fake News Detector", layout="wide") | |
st.title("π° Fake News Detector") | |
# Sidebar for Input Selection | |
st.sidebar.title("Select Input Type") | |
option = st.sidebar.radio("Choose an option", ["Text", "Image", "Video Link"]) | |
# Input Section | |
if option == "Text": | |
news_text = st.text_area("Enter the news content to check:", height=200) | |
analyze_text_clicked = st.button("Analyze News") | |
if analyze_text_clicked and news_text.strip(): | |
result, accuracy = classify_text(news_text) | |
verification_links = scrape_verification_links(news_text) | |
st.subheader("π Analysis Results") | |
st.error(f"β This news is likely **Fake**! (Accuracy: {accuracy}%)") if result == "Fake" else st.success(f"β This news is likely **Real**! (Accuracy: {accuracy}%)") | |
st.subheader("π Verification & Trusted Sources") | |
for title, link in verification_links.items(): | |
st.markdown(f"[π {title}]({link})") | |
st.markdown(f"[π Verify on Google]({verify_news(news_text)})") | |
elif option == "Image": | |
uploaded_image = st.file_uploader("Upload a news image", type=["jpg", "png", "jpeg"]) | |
analyze_image_clicked = st.button("Analyze Image") | |
if uploaded_image and analyze_image_clicked: | |
image = Image.open(uploaded_image) | |
st.image(image, caption="Uploaded Image", use_column_width=True) | |
result = analyze_image(np.array(image)) | |
st.error("β This image is likely **Fake**!") if result == "Fake" else st.success("β This image is likely **Real**!") | |
elif option == "Video Link": | |
video_url = st.text_input("Enter the video link:") | |
analyze_video_clicked = st.button("Analyze Video") | |
if analyze_video_clicked and video_url.strip(): | |
st.video(video_url) | |
result = analyze_video(video_url) | |
st.info(result) | |