import os import subprocess from flask import Flask, render_template, request, Response, jsonify from rss_processor import fetch_rss_feeds, process_and_store_articles, vector_db import logging import time import hashlib app = Flask(__name__) # Setup logging logging.basicConfig(level=logging.INFO) logger = logging.getLogger(__name__) @app.route('/') def loading(): # Start loading feeds in a subprocess subprocess.Popen(["python", "rss_processor.py", "load_feeds"]) return render_template("loading.html") @app.route('/check_feeds', methods=['GET']) def check_feeds(): try: # Check if vector DB has documents docs = vector_db.similarity_search("news", k=1) if docs: logger.info("Feeds loaded successfully in vector DB") return jsonify({"status": "loaded"}) return jsonify({"status": "loading"}), 202 except Exception as e: logger.error(f"Error checking feeds: {e}") return jsonify({"status": "error", "message": str(e)}), 500 @app.route('/index', methods=['GET']) def index(): # Show existing articles while new feeds load in background stored_docs = vector_db.similarity_search("news", k=1000) # Show all available articles # Use a set to ensure unique articles by title, link, and description hash unique_articles = {} for doc in stored_docs: title = doc.metadata["title"] link = doc.metadata["link"] description = doc.metadata["original_description"] desc_hash = hashlib.md5(description.encode()).hexdigest()[:10] key = f"{title}|{link}|{desc_hash}" if key not in unique_articles: unique_articles[key] = { "title": title, "link": link, "description": description, "category": doc.metadata["category"], "published": doc.metadata["published"], "image": doc.metadata.get("image", "svg"), } enriched_articles = list(unique_articles.values()) logger.info(f"Enriched {len(enriched_articles)} unique articles for display") if request.method == 'POST' and 'search' in request.form: query = request.form.get('search') if query: logger.info(f"Processing search query: {query}") results = vector_db.similarity_search(query, k=10) unique_search_articles = {} for doc in results: title = doc.metadata["title"] link = doc.metadata["link"] description = doc.metadata["original_description"] desc_hash = hashlib.md5(description.encode()).hexdigest()[:10] key = f"{title}|{link}|{desc_hash}" if key not in unique_search_articles: unique_search_articles[key] = { "title": title, "link": link, "description": description, "category": doc.metadata["category"], "published": doc.metadata["published"], "image": doc.metadata.get("image", "svg"), } enriched_articles = list(unique_search_articles.values()) logger.info(f"Search returned {len(enriched_articles)} unique results") categorized_articles = {} for article in enriched_articles: cat = article["category"] if cat not in categorized_articles: categorized_articles[cat] = [] categorized_articles[cat].append(article) return render_template("index.html", categorized_articles=categorized_articles, loading_new_feeds=True) if __name__ == "__main__": app.run(debug=True, host="0.0.0.0", port=5000)