Spaces:
Running
Running
File size: 3,741 Bytes
3a7387c 37df149 33e2dac 7bafad1 cb518f2 d695e20 0aab8d6 3a7387c cb518f2 33e2dac 37df149 33e2dac 0aab8d6 33e2dac 0aab8d6 33e2dac d695e20 37df149 33e2dac 3156b44 33e2dac 0aab8d6 33e2dac 3156b44 33e2dac 0aab8d6 3156b44 ce02056 1f5e987 ce02056 cb518f2 ce02056 3156b44 33e2dac 0aab8d6 33e2dac 3156b44 33e2dac 0aab8d6 3156b44 3a7387c ce02056 37df149 ce02056 3a7387c be9be7d |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 |
import os
import subprocess
from flask import Flask, render_template, request, Response, jsonify
from rss_processor import fetch_rss_feeds, process_and_store_articles, vector_db
import logging
import time
import hashlib
app = Flask(__name__)
# Setup logging
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
@app.route('/')
def loading():
# Start loading feeds in a subprocess
subprocess.Popen(["python", "rss_processor.py", "load_feeds"])
return render_template("loading.html")
@app.route('/check_feeds', methods=['GET'])
def check_feeds():
try:
# Check if vector DB has documents
docs = vector_db.similarity_search("news", k=1)
if docs:
logger.info("Feeds loaded successfully in vector DB")
return jsonify({"status": "loaded"})
return jsonify({"status": "loading"}), 202
except Exception as e:
logger.error(f"Error checking feeds: {e}")
return jsonify({"status": "error", "message": str(e)}), 500
@app.route('/index', methods=['GET'])
def index():
# Show existing articles while new feeds load in background
stored_docs = vector_db.similarity_search("news", k=1000) # Show all available articles
# Use a set to ensure unique articles by title, link, and description hash
unique_articles = {}
for doc in stored_docs:
title = doc.metadata["title"]
link = doc.metadata["link"]
description = doc.metadata["original_description"]
desc_hash = hashlib.md5(description.encode()).hexdigest()[:10]
key = f"{title}|{link}|{desc_hash}"
if key not in unique_articles:
unique_articles[key] = {
"title": title,
"link": link,
"description": description,
"category": doc.metadata["category"],
"published": doc.metadata["published"],
"image": doc.metadata.get("image", "svg"),
}
enriched_articles = list(unique_articles.values())
logger.info(f"Enriched {len(enriched_articles)} unique articles for display")
if request.method == 'POST' and 'search' in request.form:
query = request.form.get('search')
if query:
logger.info(f"Processing search query: {query}")
results = vector_db.similarity_search(query, k=10)
unique_search_articles = {}
for doc in results:
title = doc.metadata["title"]
link = doc.metadata["link"]
description = doc.metadata["original_description"]
desc_hash = hashlib.md5(description.encode()).hexdigest()[:10]
key = f"{title}|{link}|{desc_hash}"
if key not in unique_search_articles:
unique_search_articles[key] = {
"title": title,
"link": link,
"description": description,
"category": doc.metadata["category"],
"published": doc.metadata["published"],
"image": doc.metadata.get("image", "svg"),
}
enriched_articles = list(unique_search_articles.values())
logger.info(f"Search returned {len(enriched_articles)} unique results")
categorized_articles = {}
for article in enriched_articles:
cat = article["category"]
if cat not in categorized_articles:
categorized_articles[cat] = []
categorized_articles[cat].append(article)
return render_template("index.html", categorized_articles=categorized_articles, loading_new_feeds=True)
if __name__ == "__main__":
app.run(host="0.0.0.0", port=7860) |