Spaces:
Running
Running
File size: 3,702 Bytes
62c311b a7f66d4 81a136d 1e42e10 c8d1c47 7be5b04 71c7fc8 7be5b04 71c7fc8 1e42e10 7be5b04 1e42e10 71c7fc8 7be5b04 71c7fc8 5bc427c 71c7fc8 5bc427c a7f66d4 71c7fc8 1e42e10 71c7fc8 ea6e24c 7be5b04 1e42e10 a7f66d4 1e42e10 7e55fba 1e42e10 7be5b04 71c7fc8 7be5b04 71c7fc8 1e42e10 a5b7aa4 7be5b04 71c7fc8 1e42e10 71c7fc8 1e42e10 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 |
import streamlit as st
import requests
from transformers import pipeline
from deepface import DeepFace
from PIL import Image
import torch
import torchvision.transforms as transforms
import cv2
import numpy as np
from bs4 import BeautifulSoup
import re
# Load Fake News Detection Model
fake_news_pipeline = pipeline("text-classification", model="mrm8488/bert-tiny-finetuned-fake-news-detection")
def classify_text(news_text):
result = fake_news_pipeline(news_text)[0]
label = result['label'].lower()
score = result['score'] * 100 # Convert to percentage
return ("Fake" if label == "fake" else "Real"), round(score, 2)
def analyze_image(image):
try:
result = DeepFace.analyze(image, actions=['emotion'], enforce_detection=False)
return "Fake" if result[0]['dominant_emotion'] in ['disgust', 'fear', 'sad'] else "Real"
except:
return "Error in analyzing image."
def analyze_video(video_url):
return "Video analysis feature coming soon!"
def verify_news(news_text):
search_url = f"https://www.google.com/search?q={'+'.join(news_text.split())}"
return search_url
def scrape_verification_links(news_text):
sources = [
"https://www.bbc.com/news",
"https://www.cnn.com",
"https://www.reuters.com",
"https://factcheck.org",
"https://www.snopes.com",
"https://www.politifact.com"
]
verification_links = {}
for source in sources:
try:
response = requests.get(source)
soup = BeautifulSoup(response.text, 'html.parser')
for link in soup.find_all('a', href=True):
if re.search(news_text[:5], link.text, re.IGNORECASE):
verification_links[link.text] = source + link['href']
except:
continue
return verification_links
# Streamlit UI
st.set_page_config(page_title="Fake News Detector", layout="wide")
st.title("π° Fake News Detector")
# Sidebar for Input Selection
st.sidebar.title("Select Input Type")
option = st.sidebar.radio("Choose an option", ["Text", "Image", "Video Link"])
# Input Section
if option == "Text":
news_text = st.text_area("Enter the news content to check:", height=200)
analyze_text_clicked = st.button("Analyze News")
if analyze_text_clicked and news_text.strip():
result, accuracy = classify_text(news_text)
verification_links = scrape_verification_links(news_text)
st.subheader("π Analysis Results")
st.error(f"β This news is likely **Fake**! (Accuracy: {accuracy}%)") if result == "Fake" else st.success(f"β
This news is likely **Real**! (Accuracy: {accuracy}%)")
st.subheader("π Verification & Trusted Sources")
for title, link in verification_links.items():
st.markdown(f"[π {title}]({link})")
st.markdown(f"[π Verify on Google]({verify_news(news_text)})")
elif option == "Image":
uploaded_image = st.file_uploader("Upload a news image", type=["jpg", "png", "jpeg"])
analyze_image_clicked = st.button("Analyze Image")
if uploaded_image and analyze_image_clicked:
image = Image.open(uploaded_image)
st.image(image, caption="Uploaded Image", use_column_width=True)
result = analyze_image(np.array(image))
st.error("β This image is likely **Fake**!") if result == "Fake" else st.success("β
This image is likely **Real**!")
elif option == "Video Link":
video_url = st.text_input("Enter the video link:")
analyze_video_clicked = st.button("Analyze Video")
if analyze_video_clicked and video_url.strip():
st.video(video_url)
result = analyze_video(video_url)
st.info(result)
|