Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -1,7 +1,6 @@
|
|
1 |
import streamlit as st
|
2 |
import requests
|
3 |
import torch
|
4 |
-
import torchvision.transforms as transforms
|
5 |
from transformers import pipeline
|
6 |
from deepface import DeepFace
|
7 |
from PIL import Image
|
@@ -10,29 +9,25 @@ import numpy as np
|
|
10 |
from bs4 import BeautifulSoup
|
11 |
|
12 |
# Load Fake News Detection Models
|
13 |
-
|
14 |
-
|
15 |
-
|
16 |
|
17 |
def classify_text(news_text):
|
18 |
-
|
19 |
-
|
20 |
-
|
21 |
|
22 |
-
|
23 |
-
|
24 |
-
|
25 |
-
}
|
26 |
|
27 |
-
|
28 |
-
|
29 |
-
|
30 |
-
scores["Fake" if "fake" in label else "Real"] += score
|
31 |
|
32 |
-
final_label = "Fake" if
|
33 |
-
|
34 |
-
|
35 |
-
return final_label, final_score
|
36 |
|
37 |
def analyze_image(image):
|
38 |
try:
|
@@ -61,14 +56,14 @@ def analyze_video(video_path):
|
|
61 |
return "Error: " + str(e)
|
62 |
|
63 |
def verify_news(news_text):
|
64 |
-
search_url = f"https://www.google.com/search?q={'
|
65 |
sources = [
|
66 |
-
f"https://www.bbc.co.uk/search?q={'
|
67 |
-
f"https://www.cnn.com/search?q={'
|
68 |
-
f"https://www.reuters.com/search/news?blob={'
|
69 |
-
f"https://www.factcheck.org/?s={'
|
70 |
-
f"https://www.snopes.com/?s={'
|
71 |
-
f"https://www.politifact.com/search/?q={'
|
72 |
]
|
73 |
return search_url, sources
|
74 |
|
@@ -82,9 +77,12 @@ with col1:
|
|
82 |
news_text = st.text_area("Enter the news content to check:", height=200)
|
83 |
if st.button("Analyze News", key="text_analyze"):
|
84 |
if news_text.strip():
|
85 |
-
result,
|
86 |
verification_link, sources = verify_news(news_text)
|
87 |
-
st.write(f"**Result:** {result}
|
|
|
|
|
|
|
88 |
st.markdown(f"[Verify on Google]({verification_link})")
|
89 |
for source in sources:
|
90 |
st.markdown(f"[Check Source]({source})")
|
|
|
1 |
import streamlit as st
|
2 |
import requests
|
3 |
import torch
|
|
|
4 |
from transformers import pipeline
|
5 |
from deepface import DeepFace
|
6 |
from PIL import Image
|
|
|
9 |
from bs4 import BeautifulSoup
|
10 |
|
11 |
# Load Fake News Detection Models
|
12 |
+
fake_news_pipeline = pipeline("text-classification", model="mrm8488/bert-tiny-finetuned-fake-news-detection")
|
13 |
+
sentiment_pipeline = pipeline("sentiment-analysis", model="nlptown/bert-base-multilingual-uncased-sentiment")
|
14 |
+
ai_detector_pipeline = pipeline("text-classification", model="roberta-base-openai-detector")
|
15 |
|
16 |
def classify_text(news_text):
|
17 |
+
fake_news_result = fake_news_pipeline(news_text)[0]
|
18 |
+
sentiment_result = sentiment_pipeline(news_text)[0]
|
19 |
+
ai_result = ai_detector_pipeline(news_text)[0]
|
20 |
|
21 |
+
fake_label = fake_news_result['label'].lower()
|
22 |
+
sentiment_label = sentiment_result['label'].lower()
|
23 |
+
ai_label = ai_result['label'].lower()
|
|
|
24 |
|
25 |
+
fake_score = fake_news_result['score'] * 100
|
26 |
+
sentiment_score = sentiment_result['score'] * 100
|
27 |
+
ai_score = ai_result['score'] * 100
|
|
|
28 |
|
29 |
+
final_label = "Fake" if fake_label == "fake" or ai_label == "ai-generated" else "Real"
|
30 |
+
return final_label, round(fake_score, 2), round(sentiment_score, 2), round(ai_score, 2)
|
|
|
|
|
31 |
|
32 |
def analyze_image(image):
|
33 |
try:
|
|
|
56 |
return "Error: " + str(e)
|
57 |
|
58 |
def verify_news(news_text):
|
59 |
+
search_url = f"https://www.google.com/search?q={'%20'.join(news_text.split())}"
|
60 |
sources = [
|
61 |
+
f"https://www.bbc.co.uk/search?q={'%20'.join(news_text.split())}",
|
62 |
+
f"https://www.cnn.com/search?q={'%20'.join(news_text.split())}",
|
63 |
+
f"https://www.reuters.com/search/news?blob={'%20'.join(news_text.split())}",
|
64 |
+
f"https://www.factcheck.org/?s={'%20'.join(news_text.split())}",
|
65 |
+
f"https://www.snopes.com/?s={'%20'.join(news_text.split())}",
|
66 |
+
f"https://www.politifact.com/search/?q={'%20'.join(news_text.split())}"
|
67 |
]
|
68 |
return search_url, sources
|
69 |
|
|
|
77 |
news_text = st.text_area("Enter the news content to check:", height=200)
|
78 |
if st.button("Analyze News", key="text_analyze"):
|
79 |
if news_text.strip():
|
80 |
+
result, fake_acc, sentiment_acc, ai_acc = classify_text(news_text)
|
81 |
verification_link, sources = verify_news(news_text)
|
82 |
+
st.write(f"**Result:** {result}")
|
83 |
+
st.write(f"**Fake News Score:** {fake_acc}%")
|
84 |
+
st.write(f"**Sentiment Score:** {sentiment_acc}%")
|
85 |
+
st.write(f"**AI Detection Score:** {ai_acc}%")
|
86 |
st.markdown(f"[Verify on Google]({verification_link})")
|
87 |
for source in sources:
|
88 |
st.markdown(f"[Check Source]({source})")
|