import requests import gradio as gr from bs4 import BeautifulSoup import logging from urllib.parse import urlparse from requests.adapters import HTTPAdapter from requests.packages.urllib3.util.retry import Retry from trafilatura import fetch_url, extract from trafilatura import extract from trafilatura.settings import use_config from urllib.request import urlopen, Request import json from huggingface_hub import InferenceClient import random import time from sentence_transformers import SentenceTransformer, util import torch from datetime import datetime import os from dotenv import load_dotenv import certifi # Load environment variables from a .env file load_dotenv() # Set up logging logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s') logger = logging.getLogger(__name__) # SearXNG instance details SEARXNG_URL = 'https://shreyas094-searxng-local.hf.space/search' SEARXNG_KEY = 'f9f07f93b37b8483aadb5ba717f556f3a4ac507b281b4ca01e6c6288aa3e3ae5' # Use the environment variable HF_TOKEN = os.getenv('HF_TOKEN') client = InferenceClient( "mistralai/Mistral-Nemo-Instruct-2407", token=HF_TOKEN, ) # Initialize the similarity model similarity_model = SentenceTransformer('all-MiniLM-L6-v2') # Set up a session with retry mechanism def requests_retry_session( retries=0, backoff_factor=0.1, status_forcelist=(500, 502, 504), session=None, ): session = session or requests.Session() retry = Retry( total=retries, read=retries, connect=retries, backoff_factor=backoff_factor, status_forcelist=status_forcelist, ) adapter = HTTPAdapter(max_retries=retry) session.mount('http://', adapter) session.mount('https://', adapter) return session def is_valid_url(url): try: result = urlparse(url) return all([result.scheme, result.netloc]) except ValueError: return False def scrape_with_bs4(url, session): try: response = session.get(url, timeout=10) response.raise_for_status() soup = BeautifulSoup(response.content, 'html.parser') main_content = soup.find('main') or soup.find('article') or soup.find('div', class_='content') if main_content: content = main_content.get_text(strip=True) else: content = soup.get_text(strip=True) return content except Exception as e: logger.error(f"Error scraping {url} with BeautifulSoup: {e}") return "" def scrape_with_trafilatura(url): try: downloaded = fetch_url(url) content = extract(downloaded) return content or "" except Exception as e: logger.error(f"Error scraping {url} with Trafilatura: {e}") return "" def rephrase_query(chat_history, query, temperature=0.2): system_prompt = """You are a highly intelligent conversational chatbot. Your task is to analyze the given context and new query, then decide whether to rephrase the query with or without incorporating the context. Follow these steps: 1. Determine if the new query is a continuation of the previous conversation or an entirely new topic. 2. If it's a continuation, rephrase the query by incorporating relevant information from the context to make it more specific and contextual. 3. If it's a new topic, rephrase the query to make it more appropriate for a web search, focusing on clarity and accuracy without using the previous context. 4. Provide ONLY the rephrased query without any additional explanation or reasoning.""" user_prompt = f""" Context: {chat_history} New query: {query} Rephrased query: """ messages = [ {"role": "system", "content": system_prompt}, {"role": "user", "content": user_prompt} ] try: logger.info(f"Sending rephrasing request to LLM with temperature {temperature}") response = client.chat_completion( messages=messages, max_tokens=150, temperature=temperature ) logger.info("Received rephrased query from LLM") rephrased_question = response.choices[0].message.content.strip() # Remove surrounding quotes if present if (rephrased_question.startswith('"') and rephrased_question.endswith('"')) or \ (rephrased_question.startswith("'") and rephrased_question.endswith("'")): rephrased_question = rephrased_question[1:-1].strip() logger.info(f"Rephrased Query (cleaned): {rephrased_question}") return rephrased_question except Exception as e: logger.error(f"Error rephrasing query with LLM: {e}") return query # Fallback to original query if rephrasing fails def rerank_documents(query, documents): try: # Step 1: Encode the query and document summaries query_embedding = similarity_model.encode(query, convert_to_tensor=True) doc_summaries = [doc['summary'] for doc in documents] if not doc_summaries: logger.warning("No document summaries to rerank.") return documents # Return original documents if there's nothing to rerank doc_embeddings = similarity_model.encode(doc_summaries, convert_to_tensor=True) # Step 2: Compute Cosine Similarity cosine_scores = util.cos_sim(query_embedding, doc_embeddings)[0] # Step 3: Compute Dot Product Similarity dot_product_scores = torch.matmul(query_embedding, doc_embeddings.T) # Ensure dot_product_scores is a 1-D tensor if dot_product_scores.dim() == 0: dot_product_scores = dot_product_scores.unsqueeze(0) # Combine documents, cosine scores, and dot product scores scored_documents = list(zip(documents, cosine_scores, dot_product_scores)) # Step 4: Sort documents by cosine similarity score scored_documents.sort(key=lambda x: x[1], reverse=True) # Step 5: Return only the top 5 documents reranked_docs = [doc[0] for doc in scored_documents[:5]] logger.info(f"Reranked to top {len(reranked_docs)} documents.") return reranked_docs except Exception as e: logger.error(f"Error during reranking documents: {e}") return documents[:5] # Fallback to first 5 documents if reranking fails def compute_similarity(text1, text2): # Encode the texts embedding1 = similarity_model.encode(text1, convert_to_tensor=True) embedding2 = similarity_model.encode(text2, convert_to_tensor=True) # Compute cosine similarity cosine_similarity = util.pytorch_cos_sim(embedding1, embedding2) return cosine_similarity.item() def is_content_unique(new_content, existing_contents, similarity_threshold=0.8): for existing_content in existing_contents: similarity = compute_similarity(new_content, existing_content) if similarity > similarity_threshold: return False return True def assess_relevance_and_summarize(llm_client, query, document, temperature=0.2): system_prompt = """You are a financial analyst AI assistant. Your task is to assess whether the given text is relevant to the user's query from a financial perspective and provide a brief summary if it is relevant.""" user_prompt = f""" Query: {query} Document Content: {document['content']} Instructions: 1. Assess if the document is relevant to the query from a financial analyst's perspective. 2. If relevant, summarize the main points in 1-2 sentences. 3. If not relevant, simply state "Not relevant". Your response should be in the following format: Relevant: [Yes/No] Summary: [Your 1-2 sentence summary if relevant, or "Not relevant" if not] Remember to focus on financial aspects and implications in your assessment and summary. """ messages = [ {"role": "system", "content": system_prompt}, {"role": "user", "content": user_prompt} ] try: response = llm_client.chat_completion( messages=messages, max_tokens=150, temperature=temperature ) return response.choices[0].message.content.strip() except Exception as e: logger.error(f"Error assessing relevance and summarizing with LLM: {e}") return "Error: Unable to assess relevance and summarize" def scrape_full_content(url, scraper="trafilatura", max_chars=3000): try: logger.info(f"Scraping full content from: {url}") if scraper == "bs4": session = requests_retry_session() response = session.get(url, timeout=10) response.raise_for_status() soup = BeautifulSoup(response.content, 'html.parser') # Try to find the main content main_content = soup.find('main') or soup.find('article') or soup.find('div', class_='content') if main_content: content = main_content.get_text(strip=True, separator='\n') else: content = soup.get_text(strip=True, separator='\n') else: # trafilatura downloaded = fetch_url(url) content = extract(downloaded, include_comments=False, include_tables=True, no_fallback=False) # Limit the content to max_chars return content[:max_chars] if content else "" except Exception as e: logger.error(f"Error scraping full content from {url}: {e}") return "" def llm_summarize(query, documents, llm_client, temperature=0.2): system_prompt = """You are Sentinel, a world class Financial analysis AI model who is expert at searching the web and answering user's queries. You are also an expert at summarizing web pages or documents and searching for content in them.""" # Prepare the context from the documents context = "\n\n".join([f"Document {i+1}:\nTitle: {doc['title']}\nURL: {doc['url']}\n(SCRAPED CONTENT)\n{doc['full_content']}\n(/SCRAPED CONTENT)" for i, doc in enumerate(documents)]) user_prompt = f""" Query: {query} Context: {context} Instructions: Write a detailed, long and complete research document that is informative and relevant to the user, who is a financial analyst, query based on provided context (the context consists of search results containing a brief description of the content of that page). You must use this context to answer the user's query in the best way possible. Use an unbiased and writer tone in your response. Do not repeat the text. You must provide the answer in the response itself. If the user asks for links you can provide them. If the user asks to summarize content from some links, you will be provided the entire content of the page inside the (SCRAPED CONTENT) block. You can then use this content to summarize the text.Your responses should be detailed in length be informative, accurate and relevant to the user's query. You can use markdowns to format your response. You should use bullet points to list the information. Make sure the answer is long and is informative in a research document style. You have to cite the answer using [number] notation along with the appropriate source URL embedded in the notation. You must cite the sentences with their relevant context number. You must cite each and every part of the answer so the user can know where the information is coming from. Place these citations at the end of that particular sentence. You can cite the same sentence multiple times if it is relevant to the user's query like [number1][number2]. However you do not need to cite it using the same number. You can use different numbers to cite the same sentence multiple times. The number refers to the number of the search result (passed in the context) used to generate that part of the answer. Anything inside the following (SCRAPED CONTENT) block provided below is for your knowledge returned by the search engine and is not shared by the user. You have to answer question on the basis of it and cite the relevant information from it but you do not have to talk about the context in your response. If you think there's nothing relevant in the search results, you can say that 'Hmm, sorry I could not find any relevant information on this topic. Would you like me to search again or ask something else?'. You do not need to do this for summarization tasks. Anything between the (SCRAPED CONTENT) is retrieved from a search engine and is not a part of the conversation with the user. Please provide a comprehensive summary based on the above instructions: """ messages = [ {"role": "system", "content": system_prompt}, {"role": "user", "content": user_prompt} ] try: response = llm_client.chat_completion( messages=messages, max_tokens=5000, temperature=temperature ) return response.choices[0].message.content.strip() except Exception as e: logger.error(f"Error in LLM summarization: {e}") return "Error: Unable to generate a summary. Please try again." def search_and_scrape(query, chat_history, num_results=5, scraper="trafilatura", max_chars=3000, time_range="", language="all", category="", engines=[], safesearch=2, method="GET", llm_temperature=0.2): try: # Step 1: Rephrase the Query rephrased_query = rephrase_query(chat_history, query, temperature=llm_temperature) logger.info(f"Rephrased Query: {rephrased_query}") if not rephrased_query or rephrased_query.lower() == "not_needed": logger.info("No need to perform search based on the rephrased query.") return "No search needed for the provided input." # Search query parameters params = { 'q': rephrased_query, 'format': 'json', 'num_results': num_results, 'time_range': time_range, 'language': language, 'category': category, 'engines': ','.join(engines), 'safesearch': safesearch } # Remove empty parameters params = {k: v for k, v in params.items() if v != ""} # If no engines are specified, set default engines if 'engines' not in params: params['engines'] = 'google' # Default to 'google' or any preferred engine logger.info("No engines specified. Defaulting to 'google'.") # Headers for SearXNG request headers = { 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36', 'Accept': 'application/json, text/javascript, */*; q=0.01', 'Accept-Language': 'en-US,en;q=0.5', 'Origin': 'https://shreyas094-searxng-local.hf.space', 'Referer': 'https://shreyas094-searxng-local.hf.space/', 'DNT': '1', 'Connection': 'keep-alive', 'Sec-Fetch-Dest': 'empty', 'Sec-Fetch-Mode': 'cors', 'Sec-Fetch-Site': 'same-origin', } # Send request to SearXNG logger.info(f"Sending request to SearXNG for query: {rephrased_query}") session = requests_retry_session() try: if method.upper() == "GET": response = session.get(SEARXNG_URL, params=params, headers=headers, timeout=10, verify=certifi.where()) else: # POST response = session.post(SEARXNG_URL, data=params, headers=headers, timeout=10, verify=certifi.where()) response.raise_for_status() except requests.exceptions.RequestException as e: logger.error(f"Error during SearXNG request: {e}") return f"An error occurred during the search request: {e}" search_results = response.json() logger.debug(f"SearXNG Response: {search_results}") num_received = len(search_results.get('results', [])) logger.info(f"Received {num_received} results from SearXNG") if num_received == 0: logger.warning("No results returned from SearXNG.") return "No results found for the given query." scraped_content = [] for result in search_results.get('results', [])[:num_results]: url = result.get('url', '') title = result.get('title', 'No title') if not is_valid_url(url): logger.warning(f"Invalid URL: {url}") continue try: logger.info(f"Scraping content from: {url}") # Implement a retry mechanism with different user agents user_agents = [ 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36', 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/14.0 Safari/605.1.15', 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36' ] content = "" for ua in user_agents: try: if scraper == "bs4": session.headers.update({'User-Agent': ua}) content = scrape_with_bs4(url, session) else: # trafilatura # Use urllib to handle custom headers for trafilatura req = Request(url, headers={'User-Agent': ua}) with urlopen(req) as response: downloaded = response.read() # Configure trafilatura to use a specific user agent config = use_config() config.set("DEFAULT", "USER_AGENT", ua) content = extract(downloaded, config=config) if content: break except requests.exceptions.HTTPError as e: if e.response.status_code == 403: logger.warning(f"403 Forbidden error with User-Agent: {ua}. Trying next...") continue else: raise except Exception as e: logger.error(f"Error scraping {url} with User-Agent {ua}: {str(e)}") continue if not content: logger.warning(f"Failed to scrape content from {url} after trying multiple User-Agents") continue # Limit content to max_chars scraped_content.append({ "title": title, "url": url, "content": content[:max_chars], "scraper": scraper }) except requests.exceptions.RequestException as e: logger.error(f"Error scraping {url}: {e}") except Exception as e: logger.error(f"Unexpected error while scraping {url}: {e}") if not scraped_content: logger.warning("No content scraped from search results.") return "No content could be scraped from the search results." # Step 3: Assess relevance, summarize, and check for uniqueness relevant_documents = [] unique_summaries = [] for doc in scraped_content: assessment = assess_relevance_and_summarize(client, rephrased_query, doc, temperature=llm_temperature) relevance, summary = assessment.split('\n', 1) if relevance.strip().lower() == "relevant: yes": summary_text = summary.replace("Summary: ", "").strip() if is_content_unique(summary_text, unique_summaries): relevant_documents.append({ "title": doc['title'], "url": doc['url'], "summary": summary_text, "scraper": doc['scraper'] }) unique_summaries.append(summary_text) else: logger.info(f"Skipping similar content: {doc['title']}") if not relevant_documents: logger.warning("No relevant and unique documents found.") return "No relevant and unique financial news found for the given query." # Step 4: Rerank documents based on similarity to query reranked_docs = rerank_documents(rephrased_query, relevant_documents) if not reranked_docs: logger.warning("No documents remained after reranking.") return "No relevant financial news found after filtering and ranking." logger.info(f"Reranked and filtered to top {len(reranked_docs)} unique, finance-related documents.") # Step 5: Scrape full content for top 5 documents for doc in reranked_docs[:5]: full_content = scrape_full_content(doc['url'], scraper, max_chars) doc['full_content'] = full_content # Step 6: LLM Summarization llm_summary = llm_summarize(query, reranked_docs[:5], client, temperature=llm_temperature) return llm_summary except Exception as e: logger.error(f"Unexpected error in search_and_scrape: {e}") return f"An unexpected error occurred during the search and scrape process: {e}" def chat_function(message, history, num_results, scraper, max_chars, time_range, language, category, engines, safesearch, method, llm_temperature): chat_history = "\n".join([f"{role}: {msg}" for role, msg in history]) response = search_and_scrape( query=message, chat_history=chat_history, num_results=num_results, scraper=scraper, max_chars=max_chars, time_range=time_range, language=language, category=category, engines=engines, safesearch=safesearch, method=method, llm_temperature=llm_temperature ) yield response iface = gr.ChatInterface( chat_function, title="SearXNG Scraper for Financial News", description="Enter your query, and I'll search the web for the most recent and relevant financial news, scrape content, and provide summarized results.", additional_inputs=[ gr.Slider(5, 20, value=10, step=1, label="Number of initial results"), gr.Dropdown(["bs4", "trafilatura"], value="trafilatura", label="Scraping Method"), gr.Slider(500, 10000, value=1500, step=100, label="Max characters to retrieve"), gr.Dropdown(["", "day", "week", "month", "year"], value="year", label="Time Range"), gr.Dropdown(["all", "en", "fr", "de", "es", "it", "nl", "pt", "pl", "ru", "zh"], value="en", label="Language"), gr.Dropdown(["", "general", "news", "images", "videos", "music", "files", "it", "science", "social media"], value="", label="Category"), gr.Dropdown( ["google", "bing", "duckduckgo", "baidu", "yahoo", "qwant", "startpage"], multiselect=True, value=["google", "duckduckgo"], label="Engines" ), gr.Slider(0, 2, value=2, step=1, label="Safe Search Level"), gr.Radio(["GET", "POST"], value="POST", label="HTTP Method"), gr.Slider(0, 1, value=0.2, step=0.1, label="LLM Temperature"), ], additional_inputs_accordion=gr.Accordion("⚙️ Advanced Parameters", open=True), retry_btn="Retry", undo_btn="Undo", clear_btn="Clear", chatbot=gr.Chatbot( show_copy_button=True, likeable=True, layout="bubble", height=400, ) ) if __name__ == "__main__": logger.info("Starting the SearXNG Scraper for Financial News using ChatInterface with Advanced Parameters") iface.launch(share=True)