Spaces:
Running
Running
import os | |
import threading | |
from flask import Flask, render_template, request, jsonify | |
from rss_processor import fetch_rss_feeds, process_and_store_articles, vector_db, download_from_hf_hub, upload_to_hf_hub, clean_text | |
import logging | |
import time | |
from datetime import datetime | |
import hashlib | |
app = Flask(__name__) | |
# Setup logging | |
logging.basicConfig(level=logging.INFO) | |
logger = logging.getLogger(__name__) | |
# Global flag to track background loading | |
loading_complete = False | |
last_update_time = time.time() | |
def load_feeds_in_background(): | |
global loading_complete, last_update_time | |
try: | |
logger.info("Starting background RSS feed fetch") | |
articles = fetch_rss_feeds() | |
logger.info(f"Fetched {len(articles)} articles") | |
process_and_store_articles(articles) | |
last_update_time = time.time() | |
logger.info("Background feed processing complete") | |
upload_to_hf_hub() | |
loading_complete = True | |
except Exception as e: | |
logger.error(f"Error in background feed loading: {e}") | |
loading_complete = True | |
def index(): | |
global loading_complete, last_update_time | |
db_exists = os.path.exists("chroma_db") and vector_db.get().get('documents') | |
if not db_exists: | |
loading_complete = False | |
logger.info("Downloading Chroma DB from Hugging Face Hub...") | |
download_from_hf_hub() | |
threading.Thread(target=load_feeds_in_background, daemon=True).start() | |
elif not loading_complete: | |
pass | |
else: | |
loading_complete = True | |
try: | |
all_docs = vector_db.get(include=['documents', 'metadatas']) | |
total_docs = len(all_docs['documents']) if all_docs.get('documents') else 0 | |
logger.info(f"Total articles in DB: {total_docs}") | |
if not all_docs.get('metadatas'): | |
logger.info("No articles in DB yet") | |
return render_template("index.html", categorized_articles={}, has_articles=False, loading=not loading_complete) | |
# Process and categorize articles with strict deduplication | |
enriched_articles = [] | |
seen_keys = set() | |
for doc, meta in zip(all_docs['documents'], all_docs['metadatas']): | |
if not meta: | |
continue | |
title = meta.get("title", "No Title") | |
link = meta.get("link", "") | |
description = meta.get("original_description", "No Description") | |
published = meta.get("published", "Unknown Date").strip() | |
# Clean and normalize all fields | |
title = clean_text(title) | |
link = clean_text(link) | |
description = clean_text(description) | |
# Use a robust key with cleaned fields and description hash for deduplication | |
description_hash = hashlib.sha256(description.encode('utf-8')).hexdigest() | |
key = f"{title}|{link}|{published}|{description_hash}" | |
if key not in seen_keys: | |
seen_keys.add(key) | |
try: | |
published = datetime.strptime(published, "%Y-%m-%d %H:%M:%S").isoformat() if "Unknown" not in published else published | |
except (ValueError, TypeError): | |
published = "1970-01-01T00:00:00" | |
enriched_articles.append({ | |
"title": title, | |
"link": link, | |
"description": description, | |
"category": meta.get("category", "Uncategorized"), | |
"published": published, | |
"image": meta.get("image", "svg"), | |
}) | |
else: | |
logger.debug(f"Duplicate found in retrieval: {key}") | |
# Sort by published date (stable sort) | |
enriched_articles.sort(key=lambda x: x["published"], reverse=True) | |
# Group by category and limit to 10 most recent per category | |
categorized_articles = {} | |
for article in enriched_articles: | |
cat = article["category"] | |
if cat not in categorized_articles: | |
categorized_articles[cat] = [] | |
categorized_articles[cat].append(article) | |
# Sort categories alphabetically | |
categorized_articles = dict(sorted(categorized_articles.items(), key=lambda x: x[0].lower())) | |
# Limit to 10 most recent per category and log top 2 for debugging | |
for cat in categorized_articles: | |
categorized_articles[cat] = sorted(categorized_articles[cat], key=lambda x: x["published"], reverse=True)[:10] | |
if len(categorized_articles[cat]) >= 2: | |
logger.debug(f"Category {cat} top 2: {categorized_articles[cat][0]['title']} | {categorized_articles[cat][1]['title']}") | |
logger.info(f"Displaying articles: {sum(len(articles) for articles in categorized_articles.values())} total") | |
return render_template("index.html", | |
categorized_articles=categorized_articles, | |
has_articles=True, | |
loading=not loading_complete) | |
except Exception as e: | |
logger.error(f"Error retrieving articles: {e}") | |
return render_template("index.html", categorized_articles={}, has_articles=False, loading=not loading_complete) | |
def search(): | |
query = request.form.get('search') | |
if not query: | |
logger.info("Empty search query received") | |
return jsonify({"categorized_articles": {}, "has_articles": False, "loading": False}) | |
try: | |
logger.info(f"Searching for: {query}") | |
results = vector_db.similarity_search(query, k=10) | |
logger.info(f"Search returned {len(results)} results") | |
enriched_articles = [] | |
seen_keys = set() | |
for doc in results: | |
meta = doc.metadata | |
title = meta.get("title", "No Title") | |
link = meta.get("link", "") | |
description = meta.get("original_description", "No Description") | |
published = meta.get("published", "Unknown Date").strip() | |
# Clean and normalize all fields | |
title = clean_text(title) | |
link = clean_text(link) | |
description = clean_text(description) | |
description_hash = hashlib.sha256(description.encode('utf-8')).hexdigest() | |
key = f"{title}|{link}|{published}|{description_hash}" | |
if key not in seen_keys: | |
seen_keys.add(key) | |
enriched_articles.append({ | |
"title": title, | |
"link": link, | |
"description": description, | |
"category": meta.get("category", "Uncategorized"), | |
"published": published, | |
"image": meta.get("image", "svg"), | |
}) | |
categorized_articles = {} | |
for article in enriched_articles: | |
cat = article["category"] | |
categorized_articles.setdefault(cat, []).append(article) | |
logger.info(f"Found {len(enriched_articles)} unique articles across {len(categorized_articles)} categories") | |
return jsonify({ | |
"categorized_articles": categorized_articles, | |
"has_articles": bool(enriched_articles), | |
"loading": False | |
}) | |
except Exception as e: | |
logger.error(f"Search error: {e}") | |
return jsonify({"categorized_articles": {}, "has_articles": False, "loading": False}), 500 | |
def check_loading(): | |
global loading_complete, last_update_time | |
if loading_complete: | |
return jsonify({"status": "complete", "last_update": last_update_time}) | |
return jsonify({"status": "loading"}), 202 | |
def get_updates(): | |
global last_update_time | |
try: | |
all_docs = vector_db.get(include=['documents', 'metadatas']) | |
if not all_docs.get('metadatas'): | |
return jsonify({"articles": [], "last_update": last_update_time}) | |
enriched_articles = [] | |
seen_keys = set() | |
for doc, meta in zip(all_docs['documents'], all_docs['metadatas']): | |
if not meta: | |
continue | |
title = meta.get("title", "No Title") | |
link = meta.get("link", "") | |
description = meta.get("original_description", "No Description") | |
published = meta.get("published", "Unknown Date").strip() | |
# Clean and normalize all fields | |
title = clean_text(title) | |
link = clean_text(link) | |
description = clean_text(description) | |
description_hash = hashlib.sha256(description.encode('utf-8')).hexdigest() | |
key = f"{title}|{link}|{published}|{description_hash}" | |
if key not in seen_keys: | |
seen_keys.add(key) | |
try: | |
published = datetime.strptime(published, "%Y-%m-%d %H:%M:%S").isoformat() if "Unknown" not in published else published | |
except (ValueError, TypeError): | |
published = "1970-01-01T00:00:00" | |
enriched_articles.append({ | |
"title": title, | |
"link": link, | |
"description": description, | |
"category": meta.get("category", "Uncategorized"), | |
"published": published, | |
"image": meta.get("image", "svg"), | |
}) | |
enriched_articles.sort(key=lambda x: x["published"], reverse=True) | |
categorized_articles = {} | |
for article in enriched_articles: | |
cat = article["category"] | |
if cat not in categorized_articles: | |
categorized_articles[cat] = [] | |
key = f"{article['title']}|{article['link']}|{article['published']}" | |
if key not in [f"{a['title']}|{a['link']}|{a['published']}" for a in categorized_articles[cat]]: | |
categorized_articles[cat].append(article) | |
# Limit to 10 most recent per category with final deduplication | |
for cat in categorized_articles: | |
unique_articles = [] | |
seen_cat_keys = set() | |
for article in sorted(categorized_articles[cat], key=lambda x: x["published"], reverse=True): | |
key = f"{clean_text(article['title'])}|{clean_text(article['link'])}|{article['published']}" | |
if key not in seen_cat_keys: | |
seen_cat_keys.add(key) | |
unique_articles.append(article) | |
categorized_articles[cat] = unique_articles[:10] | |
return jsonify({"articles": categorized_articles, "last_update": last_update_time}) | |
except Exception as e: | |
logger.error(f"Error fetching updates: {e}") | |
return jsonify({"articles": {}, "last_update": last_update_time}), 500 | |
def get_all_articles(category): | |
try: | |
all_docs = vector_db.get(include=['documents', 'metadatas']) | |
if not all_docs.get('metadatas'): | |
return jsonify({"articles": [], "category": category}) | |
enriched_articles = [] | |
seen_keys = set() | |
for doc, meta in zip(all_docs['documents'], all_docs['metadatas']): | |
if not meta or meta.get("category") != category: | |
continue | |
title = meta.get("title", "No Title") | |
link = meta.get("link", "") | |
description = meta.get("original_description", "No Description") | |
published = meta.get("published", "Unknown Date").strip() | |
# Clean and normalize all fields | |
title = clean_text(title) | |
link = clean_text(link) | |
description = clean_text(description) | |
description_hash = hashlib.sha256(description.encode('utf-8')).hexdigest() | |
key = f"{title}|{link}|{published}|{description_hash}" | |
if key not in seen_keys: | |
seen_keys.add(key) | |
try: | |
published = datetime.strptime(published, "%Y-%m-%d %H:%M:%S").isoformat() if "Unknown" not in published else published | |
except (ValueError, TypeError): | |
published = "1970-01-01T00:00:00" | |
enriched_articles.append({ | |
"title": title, | |
"link": link, | |
"description": description, | |
"category": meta.get("category", "Uncategorized"), | |
"published": published, | |
"image": meta.get("image", "svg"), | |
}) | |
enriched_articles.sort(key=lambda x: x["published"], reverse=True) | |
return jsonify({"articles": enriched_articles, "category": category}) | |
except Exception as e: | |
logger.error(f"Error fetching all articles for category {category}: {e}") | |
return jsonify({"articles": [], "category": category}), 500 | |
def card_load(): | |
return render_template("card.html") | |
if __name__ == "__main__": | |
app.run(host="0.0.0.0", port=7860) |