Spaces:
Sleeping
Sleeping
import streamlit as st | |
import os | |
import cv2 | |
import torch | |
import torchaudio | |
import torchvision | |
import tensorflow as tf | |
from transformers import pipeline | |
from groq import Groq | |
from openai import OpenAI | |
# Set up the Groq client | |
client = Groq(api_key=os.environ.get("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")) | |
# Load a fake news detection model from Hugging Face | |
fake_news_pipeline = pipeline("text-classification", model="mrm8488/bert-tiny-finetuned-fake-news-detection") | |
# Streamlit UI | |
st.set_page_config(page_title="Fake News Detector", layout="centered") | |
st.title("\U0001F4F0 Fake News Detector") | |
# User input | |
news_text = st.text_area("Enter the news content to check:", height=200) | |
if st.button("Analyze News"): | |
if not news_text.strip(): | |
st.warning("Please enter some text.") | |
else: | |
with st.spinner("Analyzing..."): | |
# Check using Groq API | |
chat_completion = client.chat.completions.create( | |
messages=[{"role": "user", "content": f"Classify this news as Real or Fake: {news_text}"}], | |
model="llama-3.3-70b-versatile", | |
stream=False, | |
) | |
groq_result = chat_completion.choices[0].message.content.strip().lower() | |
# Check using Hugging Face model | |
hf_result = fake_news_pipeline(news_text)[0]['label'].lower() | |
# Determine the final result | |
if "fake" in groq_result or hf_result == "fake": | |
st.error("\u274C This news is likely **Fake**!", icon="β οΈ") | |
st.markdown('<style>div.stAlert {background-color: #ffdddd;}</style>', unsafe_allow_html=True) | |
elif "real" in groq_result or hf_result == "real": | |
st.success("β This news is likely **Real**!", icon="β ") | |
st.markdown('<style>div.stAlert {background-color: #ddffdd;}</style>', unsafe_allow_html=True) | |
else: | |
st.info("π€ The result is uncertain. Please verify from trusted sources.") | |