grok_test / rss_processor.py
broadfield-dev's picture
Update rss_processor.py
fe57b98 verified
raw
history blame
6.11 kB
import os
import feedparser
from huggingface_hub import HfApi, login
from langchain.vectorstores import Chroma
from langchain.embeddings import HuggingFaceEmbeddings
from langchain.docstore.document import Document
import shutil
import logging
# Setup logging
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
# Hugging Face setup
HF_API_TOKEN = os.getenv("DEMO_HF_API_TOKEN", "YOUR_HF_API_TOKEN")
REPO_ID = "broadfield-dev/news-rag-db"
LOCAL_DB_DIR = "chroma_db"
# Explicitly login to Hugging Face Hub (no InferenceClient needed anymore)
login(token=HF_API_TOKEN)
hf_api = HfApi()
# RSS feeds
RSS_FEEDS = [
"https://www.sciencedaily.com/rss/top/science.xml",
"https://www.horoscope.com/us/horoscopes/general/rss/horoscope-rss.aspx",
"http://rss.cnn.com/rss/cnn_allpolitics.rss",
"https://phys.org/rss-feed/physics-news/",
"https://www.spaceweatherlive.com/en/news/rss",
"https://weather.com/feeds/rss",
"https://www.wired.com/feed/rss",
"https://www.nasa.gov/rss/dyn/breaking_news.rss",
"https://www.nationalgeographic.com/feed/",
"https://www.nature.com/nature.rss",
"https://www.scientificamerican.com/rss/",
"https://www.newscientist.com/feed/home/",
"https://www.livescience.com/feeds/all",
"https://astrostyle.com/feed/",
"https://www.vogue.com/feed/rss",
"https://feeds.bbci.co.uk/news/politics/rss.xml",
"https://www.reuters.com/arc/outboundfeeds/newsletter-politics/?outputType=xml",
"https://www.politico.com/rss/politics.xml",
"https://thehill.com/feed/",
"https://www.aps.org/publications/apsnews/updates/rss.cfm",
"https://www.quantamagazine.org/feed/",
"https://www.sciencedaily.com/rss/matter_energy/physics.xml",
"https://physicsworld.com/feed/",
"https://www.swpc.noaa.gov/rss.xml",
"https://feeds.bbci.co.uk/weather/feeds/rss/5day/world/",
"https://www.weather.gov/rss",
"https://www.foxweather.com/rss",
"https://techcrunch.com/feed/",
"https://arstechnica.com/feed/",
"https://gizmodo.com/rss",
"https://www.theverge.com/rss/index.xml",
"https://www.space.com/feeds/all",
"https://www.universetoday.com/feed/",
"https://skyandtelescope.org/feed/",
"https://www.esa.int/rss",
"https://www.smithsonianmag.com/rss/",
"https://www.popsci.com/rss.xml",
"https://www.discovermagazine.com/rss",
"https://www.atlasobscura.com/feeds/latest"
]
# Embedding model and vector DB
embedding_model = HuggingFaceEmbeddings(model_name="sentence-transformers/all-MiniLM-L6-v2")
vector_db = Chroma(persist_directory=LOCAL_DB_DIR, embedding_function=embedding_model)
def fetch_rss_feeds():
articles = []
for feed_url in RSS_FEEDS:
try:
logger.info(f"Fetching feed: {feed_url}")
feed = feedparser.parse(feed_url)
if feed.bozo:
logger.warning(f"Failed to parse {feed_url}: {feed.bozo_exception}")
continue
for entry in feed.entries[:5]:
image = entry.get("media_content", [{}])[0].get("url") or entry.get("media_thumbnail", [{}])[0].get("url") or None
articles.append({
"title": entry.get("title", "No Title"),
"link": entry.get("link", ""),
"description": entry.get("summary", entry.get("description", "No Description")),
"published": entry.get("published", "Unknown Date"),
"category": categorize_feed(feed_url),
"image": image,
})
logger.info(f"Processed {len(feed.entries[:5])} entries from {feed_url}")
except Exception as e:
logger.error(f"Error fetching {feed_url}: {e}")
return articles
def categorize_feed(url):
if "sciencedaily" in url or "phys.org" in url:
return "Science & Physics"
elif "horoscope" in url:
return "Astrology"
elif "politics" in url:
return "Politics"
elif "spaceweather" in url or "nasa" in url:
return "Solar & Space"
elif "weather" in url:
return "Earth Weather"
else:
return "Cool Stuff"
def process_and_store_articles(articles):
documents = []
for article in articles:
try:
doc = Document(
page_content=article["description"],
metadata={
"title": article["title"],
"link": article["link"],
"original_description": article["description"],
"published": article["published"],
"category": article["category"],
"image": article["image"],
}
)
documents.append(doc)
except Exception as e:
logger.error(f"Error processing article {article['title']}: {e}")
vector_db.add_documents(documents)
vector_db.persist()
logger.info("Vector DB persisted")
upload_to_hf_hub()
def upload_to_hf_hub():
if os.path.exists(LOCAL_DB_DIR):
try:
hf_api.create_repo(repo_id=REPO_ID, repo_type="dataset", exist_ok=True, token=HF_API_TOKEN)
logger.info(f"Repository {REPO_ID} created or exists.")
except Exception as e:
logger.error(f"Error creating repo: {e}")
return
for root, _, files in os.walk(LOCAL_DB_DIR):
for file in files:
local_path = os.path.join(root, file)
remote_path = os.path.relpath(local_path, LOCAL_DB_DIR)
try:
hf_api.upload_file(
path_or_fileobj=local_path,
path_in_repo=remote_path,
repo_id=REPO_ID,
repo_type="dataset",
token=HF_API_TOKEN
)
logger.info(f"Uploaded {file} to {REPO_ID}")
except Exception as e:
logger.error(f"Error uploading file {file}: {e}")
logger.info(f"Database uploaded to: {REPO_ID}")