|
import requests |
|
import gradio as gr |
|
from bs4 import BeautifulSoup |
|
import logging |
|
from urllib.parse import urlparse |
|
from requests.adapters import HTTPAdapter |
|
from requests.packages.urllib3.util.retry import Retry |
|
from requests.exceptions import Timeout |
|
from urllib.request import urlopen, Request |
|
import json |
|
from huggingface_hub import InferenceClient |
|
import random |
|
import time |
|
from sentence_transformers import SentenceTransformer, util |
|
import torch |
|
from datetime import datetime |
|
import os |
|
from dotenv import load_dotenv |
|
import certifi |
|
import requests |
|
from newspaper import Article |
|
import PyPDF2 |
|
import io |
|
import requests |
|
import random |
|
import datetime |
|
from groq import Groq |
|
import faiss |
|
import numpy as np |
|
|
|
|
|
current_year = datetime.datetime.now().year |
|
|
|
|
|
load_dotenv() |
|
|
|
|
|
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s') |
|
logger = logging.getLogger(__name__) |
|
|
|
|
|
SEARXNG_URL = 'https://shreyas094-searxng-local.hf.space/search' |
|
SEARXNG_KEY = 'f9f07f93b37b8483aadb5ba717f556f3a4ac507b281b4ca01e6c6288aa3e3ae5' |
|
|
|
|
|
HF_TOKEN = os.getenv("HF_TOKEN") |
|
client = InferenceClient( |
|
"mistralai/Mistral-Nemo-Instruct-2407", |
|
token=HF_TOKEN, |
|
) |
|
|
|
|
|
GROQ_API_KEY = os.getenv("GROQ_API_KEY") |
|
|
|
|
|
groq_client = Groq(api_key=GROQ_API_KEY) |
|
|
|
|
|
similarity_model = SentenceTransformer('all-MiniLM-L6-v2') |
|
|
|
|
|
faiss_index = None |
|
document_store = [] |
|
|
|
|
|
def requests_retry_session( |
|
retries=0, |
|
backoff_factor=0.1, |
|
status_forcelist=(500, 502, 504), |
|
session=None, |
|
): |
|
session = session or requests.Session() |
|
retry = Retry( |
|
total=retries, |
|
read=retries, |
|
connect=retries, |
|
backoff_factor=backoff_factor, |
|
status_forcelist=status_forcelist, |
|
) |
|
adapter = HTTPAdapter(max_retries=retry) |
|
session.mount('http://', adapter) |
|
session.mount('https://', adapter) |
|
return session |
|
|
|
def is_valid_url(url): |
|
try: |
|
result = urlparse(url) |
|
return all([result.scheme, result.netloc]) |
|
except ValueError: |
|
return False |
|
|
|
def scrape_pdf_content(url, max_chars=3000, timeout=5): |
|
try: |
|
logger.info(f"Scraping PDF content from: {url}") |
|
|
|
|
|
response = requests.get(url, timeout=timeout) |
|
response.raise_for_status() |
|
|
|
|
|
pdf_reader = PyPDF2.PdfReader(io.BytesIO(response.content)) |
|
|
|
|
|
content = "" |
|
for page in pdf_reader.pages: |
|
content += page.extract_text() + "\n" |
|
|
|
|
|
return content[:max_chars] if content else "" |
|
except requests.Timeout: |
|
logger.error(f"Timeout error while scraping PDF content from {url}") |
|
return "" |
|
except Exception as e: |
|
logger.error(f"Error scraping PDF content from {url}: {e}") |
|
return "" |
|
|
|
def scrape_with_newspaper(url): |
|
if url.lower().endswith('.pdf'): |
|
return scrape_pdf_content(url) |
|
|
|
logger.info(f"Starting to scrape with Newspaper3k: {url}") |
|
try: |
|
article = Article(url) |
|
article.download() |
|
article.parse() |
|
|
|
|
|
content = f"Title: {article.title}\n\n" |
|
content += article.text |
|
|
|
|
|
if article.publish_date: |
|
content += f"\n\nPublish Date: {article.publish_date}" |
|
|
|
|
|
if article.authors: |
|
content += f"\n\nAuthors: {', '.join(article.authors)}" |
|
|
|
|
|
if article.top_image: |
|
content += f"\n\nTop Image URL: {article.top_image}" |
|
|
|
return content |
|
except Exception as e: |
|
logger.error(f"Error scraping {url} with Newspaper3k: {e}") |
|
return "" |
|
|
|
def rephrase_query(chat_history, query, temperature=0.2): |
|
system_prompt = f""" |
|
You are a highly intelligent and context-aware conversational assistant. Your tasks are as follows: |
|
|
|
1. Determine if the new query is a continuation of the previous conversation or an entirely new topic. |
|
|
|
2. For both continuations and new topics: |
|
a. **Entity Identification and Quotation**: |
|
- Analyze the user's query to identify the main entities (e.g., organizations, brands, products, locations). |
|
- For each identified entity, enclose ONLY the entity itself in double quotes within the query. |
|
- If no identifiable entities are found, proceed without adding quotes. |
|
b. **Query Preservation**: |
|
- Maintain the entire original query, including any parts after commas or other punctuation. |
|
- Do not remove or truncate any part of the original query. |
|
|
|
3. If it's a continuation: |
|
- Incorporate relevant information from the context to make the query more specific and contextual. |
|
- Ensure that entities from the previous context are properly quoted if they appear in the rephrased query. |
|
|
|
4. For both continuations and new topics: |
|
- Append "after: {current_year}" to the end of the rephrased query. |
|
- Ensure there is a space before "after:" for proper formatting. |
|
- Do not use quotes or the "+" operator when adding the year. |
|
|
|
5. **Output**: |
|
- Return ONLY the rephrased query, ensuring it is concise, clear, and contextually accurate. |
|
- Do not include any additional commentary or explanation. |
|
|
|
### Example Scenarios |
|
**Scenario 1: New Topic** |
|
- **User Query**: "What is the latest news on Golomt Bank?" |
|
- **Rephrased Query**: "What is the latest news on \"Golomt Bank\" after: {current_year}" |
|
|
|
**Scenario 2: Continuation** |
|
- **Previous Query**: "What is the latest news on Golomt Bank?" |
|
- **User Query**: "How did the Bank perform in Q2 2024?" |
|
- **Rephrased Query**: "How did \"Golomt Bank\" perform in Q2 2024 after: {current_year}" |
|
|
|
**Scenario 3: Query with Multiple Entities and Comma** |
|
- **User Query**: "What is the latest news about Prospect Capital, did the rating change?" |
|
- **Rephrased Query**: "What is the latest news about \"Prospect Capital\", did the rating change after: {current_year}" |
|
|
|
**Scenario 4: Query Without Recognizable Entities** |
|
- **User Query**: "How does photosynthesis work?" |
|
- **Rephrased Query**: "How does photosynthesis work? after: {current_year}" |
|
""" |
|
user_prompt = f""" |
|
Conversation context: |
|
{chat_history} |
|
New query: {query} |
|
Rephrased query: |
|
""" |
|
messages = [ |
|
{"role": "system", "content": system_prompt}, |
|
{"role": "user", "content": user_prompt} |
|
] |
|
try: |
|
logger.info(f"Sending rephrasing request to LLM with temperature {temperature}") |
|
response = client.chat_completion( |
|
messages=messages, |
|
max_tokens=150, |
|
temperature=temperature |
|
) |
|
logger.info("Received rephrased query from LLM") |
|
rephrased_question = response.choices[0].message.content.strip() |
|
|
|
if (rephrased_question.startswith('"') and rephrased_question.endswith('"')) or \ |
|
(rephrased_question.startswith("'") and rephrased_question.endswith("'")): |
|
rephrased_question = rephrased_question[1:-1].strip() |
|
logger.info(f"Rephrased Query (cleaned): {rephrased_question}") |
|
return rephrased_question |
|
except Exception as e: |
|
logger.error(f"Error rephrasing query with LLM: {e}") |
|
return query |
|
|
|
def rerank_documents(query, documents, similarity_threshold=0.95, max_results=5): |
|
try: |
|
|
|
query_embedding = similarity_model.encode(query, convert_to_tensor=True) |
|
doc_summaries = [doc['summary'] for doc in documents] |
|
|
|
if not doc_summaries: |
|
logger.warning("No document summaries to rerank.") |
|
return documents |
|
|
|
doc_embeddings = similarity_model.encode(doc_summaries, convert_to_tensor=True) |
|
|
|
|
|
cosine_scores = util.cos_sim(query_embedding, doc_embeddings)[0] |
|
|
|
|
|
scored_documents = list(zip(documents, cosine_scores)) |
|
|
|
|
|
scored_documents.sort(key=lambda x: x[1], reverse=True) |
|
|
|
|
|
filtered_docs = [] |
|
for doc, score in scored_documents: |
|
if score < 0.5: |
|
continue |
|
|
|
|
|
is_similar = False |
|
for selected_doc in filtered_docs: |
|
similarity = util.pytorch_cos_sim( |
|
similarity_model.encode(doc['summary'], convert_to_tensor=True), |
|
similarity_model.encode(selected_doc['summary'], convert_to_tensor=True) |
|
) |
|
if similarity > similarity_threshold: |
|
is_similar = True |
|
break |
|
|
|
if not is_similar: |
|
filtered_docs.append(doc) |
|
|
|
if len(filtered_docs) >= max_results: |
|
break |
|
|
|
logger.info(f"Reranked and filtered to {len(filtered_docs)} unique documents.") |
|
return filtered_docs |
|
except Exception as e: |
|
logger.error(f"Error during reranking documents: {e}") |
|
return documents[:max_results] |
|
|
|
def compute_similarity(text1, text2): |
|
|
|
embedding1 = similarity_model.encode(text1, convert_to_tensor=True) |
|
embedding2 = similarity_model.encode(text2, convert_to_tensor=True) |
|
|
|
|
|
cosine_similarity = util.pytorch_cos_sim(embedding1, embedding2) |
|
|
|
return cosine_similarity.item() |
|
|
|
def is_content_unique(new_content, existing_contents, similarity_threshold=0.8): |
|
for existing_content in existing_contents: |
|
similarity = compute_similarity(new_content, existing_content) |
|
if similarity > similarity_threshold: |
|
return False |
|
return True |
|
|
|
def assess_relevance_and_summarize(llm_client, query, document, temperature=0.2): |
|
system_prompt = """You are a world-class AI assistant specializing in financial news analysis. Your task is to assess the relevance of a given document to a user's query and provide a detailed summary if it's relevant.""" |
|
|
|
user_prompt = f""" |
|
Query: {query} |
|
|
|
Document Title: {document['title']} |
|
Document Content: |
|
{document['content'][:1000]} # Limit to first 1000 characters for efficiency |
|
|
|
Instructions: |
|
1. Assess if the document is relevant to the QUERY made by the user. |
|
2. If relevant, provide a detailed summary that captures the unique aspects of this particular news item. Include: |
|
- Key facts and figures |
|
- Dates of events or announcements |
|
- Names of important entities mentioned |
|
- Any financial metrics or changes reported |
|
- The potential impact or significance of the news |
|
3. If not relevant, simply state "Not relevant". |
|
|
|
Your response should be in the following format: |
|
Relevant: [Yes/No] |
|
Summary: [Your detailed summary if relevant, or "Not relevant" if not] |
|
|
|
Remember to focus on financial aspects and implications in your assessment and summary. Aim to make the summary distinctive, highlighting what makes this particular news item unique compared to similar news. |
|
""" |
|
|
|
messages = [ |
|
{"role": "system", "content": system_prompt}, |
|
{"role": "user", "content": user_prompt} |
|
] |
|
|
|
try: |
|
response = llm_client.chat_completion( |
|
messages=messages, |
|
max_tokens=300, |
|
temperature=temperature, |
|
top_p=0.9, |
|
frequency_penalty=1.4 |
|
) |
|
return response.choices[0].message.content.strip() |
|
except Exception as e: |
|
logger.error(f"Error assessing relevance and summarizing with LLM: {e}") |
|
return "Error: Unable to assess relevance and summarize" |
|
|
|
def scrape_full_content(url, scraper="bs4", max_chars=3000, timeout=5): |
|
try: |
|
logger.info(f"Scraping full content from: {url}") |
|
|
|
|
|
if url.lower().endswith('.pdf'): |
|
return scrape_pdf_content(url, max_chars, timeout) |
|
|
|
if scraper == "bs4": |
|
session = requests_retry_session() |
|
response = session.get(url, timeout=timeout) |
|
response.raise_for_status() |
|
soup = BeautifulSoup(response.content, 'html.parser') |
|
|
|
|
|
main_content = soup.find('main') or soup.find('article') or soup.find('div', class_='content') |
|
|
|
if main_content: |
|
content = main_content.get_text(strip=True, separator='\n') |
|
else: |
|
content = soup.get_text(strip=True, separator='\n') |
|
elif scraper == "trafilatura": |
|
content = scrape_with_trafilatura(url, max_chars, timeout, use_beautifulsoup=True) |
|
elif scraper == "scrapy": |
|
content = scrape_with_scrapy(url, timeout) |
|
elif scraper == "newspaper": |
|
content = scrape_with_newspaper(url) |
|
else: |
|
logger.error(f"Unknown scraper: {scraper}") |
|
return "" |
|
|
|
|
|
return content[:max_chars] if content else "" |
|
except requests.Timeout: |
|
logger.error(f"Timeout error while scraping full content from {url}") |
|
return "" |
|
except Exception as e: |
|
logger.error(f"Error scraping full content from {url}: {e}") |
|
return "" |
|
|
|
def llm_summarize(json_input, model, temperature=0.2): |
|
system_prompt = """You are Sentinel, a world-class Financial analysis AI model who is expert at searching the web and answering user's queries. You are also an expert at summarizing web pages or documents and searching for content in them.""" |
|
|
|
user_prompt = f""" |
|
Please provide a comprehensive summary based on the following JSON input: |
|
{json_input} |
|
|
|
Instructions: |
|
1. Analyze the query and the provided documents. |
|
2. Write a detailed, long, and complete research document that is informative and relevant to the user's query. |
|
3. Use an unbiased and professional tone in your response. |
|
4. Do not repeat text verbatim from the input. |
|
5. Provide the answer in the response itself. |
|
6. You can use markdown to format your response. |
|
7. Use bullet points to list information where appropriate. |
|
8. Cite the answer using [number] notation along with the appropriate source URL embedded in the notation. |
|
9. Place these citations at the end of the relevant sentences. |
|
10. You can cite the same sentence multiple times if it's relevant to different parts of your answer. |
|
|
|
Your response should be detailed, informative, accurate, and directly relevant to the user's query.""" |
|
|
|
messages = [ |
|
{"role": "system", "content": system_prompt}, |
|
{"role": "user", "content": user_prompt} |
|
] |
|
|
|
try: |
|
if model == "groq": |
|
response = groq_client.chat.completions.create( |
|
messages=messages, |
|
model="llama-3.1-8b-instant", |
|
max_tokens=5500, |
|
temperature=temperature, |
|
top_p=0.9, |
|
presence_penalty=1.2, |
|
stream=False |
|
) |
|
return response.choices[0].message.content.strip() |
|
else: |
|
response = client.chat_completion( |
|
messages=messages, |
|
max_tokens=10000, |
|
temperature=temperature, |
|
frequency_penalty=1.4, |
|
top_p=0.9 |
|
) |
|
return response.choices[0].message.content.strip() |
|
except Exception as e: |
|
logger.error(f"Error in LLM summarization: {e}") |
|
return "Error: Unable to generate a summary. Please try again." |
|
|
|
def create_or_reset_faiss_index(dimension=384): |
|
global faiss_index |
|
faiss_index = faiss.IndexFlatL2(dimension) |
|
|
|
def add_documents_to_faiss(documents): |
|
global faiss_index, document_store |
|
|
|
|
|
document_store.clear() |
|
|
|
|
|
embeddings = [] |
|
for doc in documents: |
|
|
|
text_to_embed = f"{doc['title']} {doc['content'][:500]}" |
|
embedding = embedding_model.encode(text_to_embed) |
|
embeddings.append(embedding) |
|
document_store.append(doc) |
|
|
|
|
|
embeddings_array = np.array(embeddings).astype('float32') |
|
|
|
|
|
faiss_index.add(embeddings_array) |
|
|
|
def search_similar_documents(query, k=5): |
|
global faiss_index, document_store |
|
|
|
|
|
query_embedding = embedding_model.encode(query) |
|
query_embedding = np.array([query_embedding]).astype('float32') |
|
|
|
|
|
distances, indices = faiss_index.search(query_embedding, k) |
|
|
|
|
|
similar_docs = [document_store[i] for i in indices[0]] |
|
|
|
return similar_docs |
|
|
|
def search_and_scrape(query, chat_history, num_results=5, max_chars=3000, time_range="", language="all", category="", |
|
engines=[], safesearch=2, method="GET", llm_temperature=0.2, timeout=5, model="huggingface"): |
|
try: |
|
|
|
rephrased_query = rephrase_query(chat_history, query, temperature=llm_temperature) |
|
logger.info(f"Rephrased Query: {rephrased_query}") |
|
|
|
if not rephrased_query or rephrased_query.lower() == "not_needed": |
|
logger.info("No need to perform search based on the rephrased query.") |
|
return "No search needed for the provided input." |
|
|
|
|
|
|
|
params = { |
|
'q': rephrased_query, |
|
'format': 'json', |
|
'time_range': time_range, |
|
'language': language, |
|
'category': category, |
|
'engines': ','.join(engines), |
|
'safesearch': safesearch |
|
} |
|
|
|
|
|
params = {k: v for k, v in params.items() if v != ""} |
|
|
|
|
|
if 'engines' not in params: |
|
params['engines'] = 'google' |
|
logger.info("No engines specified. Defaulting to 'google'.") |
|
|
|
|
|
headers = { |
|
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36', |
|
'Accept': 'application/json, text/javascript, */*; q=0.01', |
|
'Accept-Language': 'en-US,en;q=0.5', |
|
'Origin': 'https://shreyas094-searxng-local.hf.space', |
|
'Referer': 'https://shreyas094-searxng-local.hf.space/', |
|
'DNT': '1', |
|
'Connection': 'keep-alive', |
|
'Sec-Fetch-Dest': 'empty', |
|
'Sec-Fetch-Mode': 'cors', |
|
'Sec-Fetch-Site': 'same-origin', |
|
} |
|
|
|
scraped_content = [] |
|
page = 1 |
|
while len(scraped_content) < num_results: |
|
|
|
params['pageno'] = page |
|
|
|
|
|
logger.info(f"Sending request to SearXNG for query: {rephrased_query} (Page {page})") |
|
session = requests_retry_session() |
|
|
|
try: |
|
if method.upper() == "GET": |
|
response = session.get(SEARXNG_URL, params=params, headers=headers, timeout=10, verify=certifi.where()) |
|
else: |
|
response = session.post(SEARXNG_URL, data=params, headers=headers, timeout=10, verify=certifi.where()) |
|
|
|
response.raise_for_status() |
|
except requests.exceptions.RequestException as e: |
|
logger.error(f"Error during SearXNG request: {e}") |
|
return f"An error occurred during the search request: {e}" |
|
|
|
search_results = response.json() |
|
logger.debug(f"SearXNG Response: {search_results}") |
|
|
|
results = search_results.get('results', []) |
|
if not results: |
|
logger.warning(f"No more results returned from SearXNG on page {page}.") |
|
break |
|
|
|
for result in results: |
|
if len(scraped_content) >= num_results: |
|
break |
|
|
|
url = result.get('url', '') |
|
title = result.get('title', 'No title') |
|
|
|
if not is_valid_url(url): |
|
logger.warning(f"Invalid URL: {url}") |
|
continue |
|
|
|
try: |
|
logger.info(f"Processing content from: {url}") |
|
|
|
content = scrape_full_content(url, max_chars, timeout) |
|
|
|
if not content: |
|
logger.warning(f"Failed to scrape content from {url}") |
|
continue |
|
|
|
scraped_content.append({ |
|
"title": title, |
|
"url": url, |
|
"content": content, |
|
"scraper": "pdf" if url.lower().endswith('.pdf') else "newspaper" |
|
}) |
|
logger.info(f"Successfully scraped content from {url}. Total scraped: {len(scraped_content)}") |
|
except requests.exceptions.RequestException as e: |
|
logger.error(f"Error scraping {url}: {e}") |
|
except Exception as e: |
|
logger.error(f"Unexpected error while scraping {url}: {e}") |
|
|
|
page += 1 |
|
|
|
if not scraped_content: |
|
logger.warning("No content scraped from search results.") |
|
return "No content could be scraped from the search results." |
|
|
|
logger.info(f"Successfully scraped {len(scraped_content)} documents.") |
|
|
|
|
|
relevant_documents = [] |
|
unique_summaries = [] |
|
for doc in scraped_content: |
|
assessment = assess_relevance_and_summarize(client, rephrased_query, doc, temperature=llm_temperature) |
|
relevance, summary = assessment.split('\n', 1) |
|
|
|
if relevance.strip().lower() == "relevant: yes": |
|
summary_text = summary.replace("Summary: ", "").strip() |
|
|
|
if is_content_unique(summary_text, unique_summaries): |
|
relevant_documents.append({ |
|
"title": doc['title'], |
|
"url": doc['url'], |
|
"summary": summary_text, |
|
"scraper": doc['scraper'] |
|
}) |
|
unique_summaries.append(summary_text) |
|
else: |
|
logger.info(f"Skipping similar content: {doc['title']}") |
|
|
|
if not relevant_documents: |
|
logger.warning("No relevant and unique documents found.") |
|
return "No relevant and unique financial news found for the given query." |
|
|
|
|
|
reranked_docs = rerank_documents(rephrased_query, relevant_documents, similarity_threshold=0.95, max_results=num_results) |
|
|
|
if not reranked_docs: |
|
logger.warning("No documents remained after reranking.") |
|
return "No relevant financial news found after filtering and ranking." |
|
|
|
logger.info(f"Reranked and filtered to top {len(reranked_docs)} unique, finance-related documents.") |
|
|
|
|
|
|
|
create_or_reset_faiss_index() |
|
|
|
|
|
add_documents_to_faiss(reranked_docs[:num_results]) |
|
|
|
|
|
similar_docs = search_similar_documents(query, k=num_results) |
|
|
|
|
|
llm_input = { |
|
"query": query, |
|
"documents": [ |
|
{ |
|
"title": doc['title'], |
|
"url": doc['url'], |
|
"summary": doc['summary'], |
|
"full_content": doc['full_content'] |
|
} for doc in reranked_docs[:num_results] |
|
], |
|
"similar_documents": [ |
|
{ |
|
"title": doc['title'], |
|
"url": doc['url'], |
|
"content": doc['content'][:500] |
|
} for doc in similar_docs |
|
] |
|
} |
|
|
|
|
|
llm_summary = llm_summarize(json.dumps(llm_input), model, temperature=llm_temperature) |
|
|
|
return llm_summary |
|
|
|
except Exception as e: |
|
logger.error(f"Unexpected error in search_and_scrape: {e}") |
|
return f"An unexpected error occurred during the search and scrape process: {e}" |
|
|
|
def chat_function(message, history, num_results, max_chars, time_range, language, category, engines, safesearch, method, llm_temperature, model): |
|
chat_history = "\n".join([f"{role}: {msg}" for role, msg in history]) |
|
|
|
response = search_and_scrape( |
|
query=message, |
|
chat_history=chat_history, |
|
num_results=num_results, |
|
max_chars=max_chars, |
|
time_range=time_range, |
|
language=language, |
|
category=category, |
|
engines=engines, |
|
safesearch=safesearch, |
|
method=method, |
|
llm_temperature=llm_temperature, |
|
model=model |
|
) |
|
|
|
yield response |
|
|
|
iface = gr.ChatInterface( |
|
chat_function, |
|
title="Web Scraper for Financial News", |
|
description="Enter your query, and I'll search the web for the most recent and relevant financial news, scrape content, and provide summarized results.", |
|
theme=gr.Theme.from_hub("allenai/gradio-theme"), |
|
additional_inputs=[ |
|
gr.Slider(5, 20, value=10, step=1, label="Number of initial results"), |
|
gr.Slider(500, 10000, value=1500, step=100, label="Max characters to retrieve"), |
|
gr.Dropdown(["", "day", "week", "month", "year"], value="", label="Time Range"), |
|
gr.Dropdown(["", "all", "en", "fr", "de", "es", "it", "nl", "pt", "pl", "ru", "zh"], value="", label="Language"), |
|
gr.Dropdown(["", "general", "news", "images", "videos", "music", "files", "it", "science", "social media"], value="", label="Category"), |
|
gr.Dropdown( |
|
["google", "bing", "duckduckgo", "baidu", "yahoo", "qwant", "startpage"], |
|
multiselect=True, |
|
value=["google", "duckduckgo", "bing", "qwant"], |
|
label="Engines" |
|
), |
|
gr.Slider(0, 2, value=2, step=1, label="Safe Search Level"), |
|
gr.Radio(["GET", "POST"], value="POST", label="HTTP Method"), |
|
gr.Slider(0, 1, value=0.2, step=0.1, label="LLM Temperature"), |
|
gr.Dropdown(["huggingface", "groq"], value="huggingface", label="LLM Model"), |
|
], |
|
additional_inputs_accordion=gr.Accordion("⚙️ Advanced Parameters", open=True), |
|
retry_btn="Retry", |
|
undo_btn="Undo", |
|
clear_btn="Clear", |
|
chatbot=gr.Chatbot( |
|
show_copy_button=True, |
|
likeable=True, |
|
layout="bubble", |
|
height=500, |
|
) |
|
) |
|
|
|
if __name__ == "__main__": |
|
logger.info("Starting the SearXNG Scraper for Financial News using ChatInterface with Advanced Parameters") |
|
iface.launch(share=True) |